diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp index b73c3977f6a3319866dd0954221ad23ae2b20151..d8412dbd4ddb4ec371649d180bce10a80dd624f3 100644 --- a/include/aidge/data/Data.hpp +++ b/include/aidge/data/Data.hpp @@ -47,14 +47,14 @@ enum class DataType { class Data { public: - constexpr Data(const char* type): mType(type) {}; - constexpr const char* type() const { + Data(const std::string& type): mType(type) {}; + constexpr const std::string& type() const { return mType; } virtual ~Data() = default; private: - const char* mType; + const std::string mType; }; } diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 3ccd55d3f19b3cff70c1a100d980ae63213261c5..95101bb3ad1704f4acb8dd3e46ef7ee450f1f91f 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -452,7 +452,7 @@ class Tensor : public Data, std::string toString() const; - inline void print() const { printf("%s\n", toString().c_str()); } + inline void print() const { fmt::print("{}\n", toString()); } std::shared_ptr<Tensor> grad() { if (!mGrad) { diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index d6c80e800e310b5d6890a317773f67c08d346da0..f6d81b5781dd25c990f496fa9f592502c9705eba 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -70,7 +70,7 @@ public: void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final { assert(inputIdx < 3 && "operators supports only 3 inputs"); - assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); + assert(data->type() == Tensor::Type && "input data must be of Tensor type"); // TODO: FIXME: check this, because data dims may not be initialized at this point... //if (inputIdx == 2) { // assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0)); diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 49ff7b0bc3ead25ff5a01849be0112a8262f25cb..c315e671c2f084af869e3b21107066137496366b 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -102,8 +102,8 @@ public: ~GenericOperator_Op() = default; - void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { printf("setBackend: not available yet.\n"); } - void setDataType(const DataType& /*datatype*/) const override { printf("setDataType: not available yet.\n"); } + void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } + void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } }; /** diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index 0fbb48339ab8eb085d1c16edc0fdf4116d9d69ce..c2e6eaff77971c3dcf350a02bc5089d08b5c8488 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -78,29 +78,19 @@ public: void backward() override final { } void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override final { - if (strcmp(data->type(), "Tensor") != 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator only accepts Tensors as outputs", type().c_str()); - } - if (outputIdx >= nbInputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} outputs", type().c_str(), nbInputs()); - } + AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as outputs", type()); + AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs()); *mInputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data); } void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override final { - if (strcmp(data->type(), "Tensor") != 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator only accepts Tensors as inputs", type().c_str()); - } - if (outputIdx >= nbInputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} outputs", type().c_str(), nbInputs()); - } + AIDGE_ASSERT(data->type() == "Tensor", "{} Operator only accepts Tensors as inputs", type()); + AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs()); *mInputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data)); } const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const override final { - if (outputIdx >= nbInputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} outputs", type().c_str(), nbInputs()); - } + AIDGE_ASSERT(outputIdx < nbInputs(), "{} Operator has {} outputs", type(), nbInputs()); if (mInputs[outputIdx] == nullptr){ return mOutputs[outputIdx]; // Input is not initialized with empty tensor } diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index d172af49fb85e054c02ac7d2c1ea1f0855b1264a..9290e79572a761cea930214f70003d88f8be43e0 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -63,7 +63,7 @@ public: } void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final { - assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); + AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type"); AIDGE_ASSERT(inputIdx < mGraph->getOrderedInputs().size(), "associateInput(): inputIdx ({}) out of bound for MetaOperator", inputIdx); const auto& inputOp = mGraph->getOrderedInputs()[inputIdx]; diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index f0d6c29a5f39ecbd7e1b20c334368f64e745673a..0731498dd3e06541ed82a86a98c2ae0bb355f413 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -107,10 +107,10 @@ public: public: void forward() override final { - printf("Basic Producer forward() function.\n"); + fmt::print("Basic Producer forward() function.\n"); } void backward() override final { - printf("Basic Producer backward() function.\n"); + fmt::print("Basic Producer backward() function.\n"); } void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override { if (getAttr<ProdAttr::Constant>()) { diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp index a5d742fb52b3fe3a5fd9b4ac0d1c86c0aba0c1d0..4d604d520d3d8af532e196c7785896ddc1c242d0 100644 --- a/include/aidge/utils/Registrar.hpp +++ b/include/aidge/utils/Registrar.hpp @@ -57,7 +57,7 @@ struct Registrar { typedef typename C::registrar_type registrar_type; Registrar(const registrar_key& key, registrar_type func) { - //printf("REGISTRAR: %s\n", key.c_str()); + //fmt::print("REGISTRAR: {}\n", key); bool newInsert; std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func)); //assert(newInsert && "registrar already exists"); diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp index 02789bd857be16e6892cb7486003530665b67495..6bf59155373cf73d158fce4eb5bda58f7d279e69 100644 --- a/include/aidge/utils/StaticAttributes.hpp +++ b/include/aidge/utils/StaticAttributes.hpp @@ -88,9 +88,9 @@ public: // Runtime access with name template <typename R> - R& getAttr(const char* name) { + R& getAttr(const std::string& name) { for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) { - if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) { + if (name == EnumStrings<ATTRS_ENUM>::data[i]) { return getAttr<R>(i); } } @@ -99,9 +99,9 @@ public: } template <typename R> - const R& getAttr(const char* name) const { + const R& getAttr(const std::string& name) const { for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) { - if (strcmp(EnumStrings<ATTRS_ENUM>::data[i], name) == 0) { + if (name == EnumStrings<ATTRS_ENUM>::data[i]) { return getAttr<R>(i); } } @@ -190,7 +190,7 @@ public: } } - AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"{}\" not found", name.c_str()); + AIDGE_THROW_OR_ABORT(std::runtime_error, "attribute \"{}\" not found", name); } std::set<std::string> getAttrsName() const override final { @@ -227,7 +227,7 @@ public: } } - AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name.c_str()); + AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name); } @@ -242,7 +242,7 @@ public: return; } } - AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name.c_str()); + AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name); } #endif diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp index 3e9f015250acda34f0ae55af38f67df3ca4ad180..df3792fd784a2ef2b9418628959629ac59c04094 100644 --- a/python_binding/data/pybind_Data.cpp +++ b/python_binding/data/pybind_Data.cpp @@ -30,7 +30,7 @@ void init_Data(py::module& m){ ; py::class_<Data, std::shared_ptr<Data>>(m,"Data") - .def(py::init<const char*>()); + .def(py::init<const std::string&>()); } diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index 85eda8f663146d106e11a21eb0751d456b06aef3..93389edf663a6154daf0b9ef2a7cc4095abc4d0f 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -46,7 +46,7 @@ void addCtor(py::class_<Tensor, newTensor->setBackend(backend); newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size()); }else{ - AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend.c_str(), backend.c_str()); + AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend, backend); } return newTensor; diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index dd94f8cdc8bd34bdaa48d9c4669dbb8d00caf902..0be4104afcf68d4282637eec714ce4e4cfcd37ab 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -61,10 +61,16 @@ std::string Aidge::GraphView::name() const { return mName; } void Aidge::GraphView::setName(const std::string &name) { mName = name; } void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) const { - FILE *fp = std::fopen((path + ".mmd").c_str(), "w"); - std::fprintf(fp, - "%%%%{init: {'flowchart': { 'curve': 'monotoneY'}, " - "'fontFamily': 'Verdana' } }%%%%\nflowchart TB\n\n"); + auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((path + ".mmd").c_str(), "w"), &std::fclose); + + if (!fp) { + AIDGE_THROW_OR_ABORT(std::runtime_error, + "Could not create graph view log file: {}", path + ".mmd"); + } + + fmt::print(fp.get(), + "%%{{init: {{'flowchart': {{ 'curve': 'monotoneY'}}, " + "'fontFamily': 'Verdana' }} }}%%\nflowchart TB\n\n"); // Start by creating every node const auto namePtrTable = getRankedNodesName("{3}"); @@ -100,8 +106,8 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) } if (node_ptr == mRootNode || node_ptr->type() != "Producer" || showProducers) { - std::fprintf(fp, "%s_%s(%s)%s\n", node_ptr->type().c_str(), namePtrTable.at(node_ptr).c_str(), - givenName.c_str(), nodeCls.c_str()); + fmt::print(fp.get(), "{}_{}({}){}\n", node_ptr->type(), namePtrTable.at(node_ptr), + givenName, nodeCls); } } @@ -125,12 +131,12 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) } if (mNodes.find(child) != mNodes.end()) { - std::fprintf(fp, "%s_%s-->|\"%u%s→%u\"|%s_%s\n", node_ptr->type().c_str(), namePtrTable.at(node_ptr).c_str(), - outputIdx, dims.c_str(), inputIdx, child->type().c_str(), namePtrTable.at(child).c_str()); + fmt::print(fp.get(), "{}_{}-->|\"{}{}→{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr), + outputIdx, dims, inputIdx, child->type(), namePtrTable.at(child)); } else if (verbose) { - std::fprintf(fp, "%s_%s-->|\"%u%s→%u\"|%p:::externalCls\n", node_ptr->type().c_str(), namePtrTable.at(node_ptr).c_str(), - outputIdx, dims.c_str(), inputIdx, static_cast<void*>(child.get())); + fmt::print(fp.get(), "{}_{}-->|\"{}{}→{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr), + outputIdx, dims, inputIdx, static_cast<void*>(child.get())); } break; } @@ -145,11 +151,11 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) size_t inputIdx = 0; for (auto input : mInputNodes) { if (input.first != nullptr) { - std::fprintf(fp, "input%lu((in#%lu)):::inputCls--->|→%u|%s_%s\n", inputIdx, inputIdx, - input.second, input.first->type().c_str(), namePtrTable.at(input.first).c_str()); + fmt::print(fp.get(), "input{}((in#{})):::inputCls--->|→{}|{}_{}\n", inputIdx, inputIdx, + input.second, input.first->type(), namePtrTable.at(input.first)); } else { - std::fprintf(fp, "input%lu((in#%lu)):::inputCls\n", inputIdx, inputIdx); + fmt::print(fp.get(), "input{}((in#{})):::inputCls\n", inputIdx, inputIdx); } ++inputIdx; } @@ -164,28 +170,27 @@ void Aidge::GraphView::save(std::string path, bool verbose, bool showProducers) dims += " " + fmt::format("{}", op->getOutput(output.second)->dims()); } - std::fprintf(fp, "%s_%s--->|\"%u%s→\"|output%lu((out#%lu)):::outputCls\n", - output.first->type().c_str(), namePtrTable.at(output.first).c_str(), output.second, - dims.c_str(), outputIdx, outputIdx); + fmt::print(fp.get(), "{}_{}--->|\"{}{}→\"|output{}((out#{})):::outputCls\n", + output.first->type(), namePtrTable.at(output.first), output.second, + dims, outputIdx, outputIdx); } else { - std::fprintf(fp, "output%lu((out#%lu)):::outputCls\n", outputIdx, outputIdx); + fmt::print(fp.get(), "output{}((out#{})):::outputCls\n", outputIdx, outputIdx); } ++outputIdx; } - std::fprintf(fp, "classDef inputCls fill:#afa\n"); - std::fprintf(fp, "classDef outputCls fill:#ffa\n"); - std::fprintf(fp, "classDef externalCls fill:#ccc\n"); - std::fprintf(fp, "classDef producerCls fill:#ccf\n"); - std::fprintf(fp, "classDef genericCls fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n"); - std::fprintf(fp, "classDef metaCls stroke-width:5px\n"); - std::fprintf(fp, "classDef rootCls stroke:#f00\n"); - std::fprintf(fp, "classDef producerCls_rootCls stroke:#f00,fill:#ccf\n"); - std::fprintf(fp, "classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n"); - std::fprintf(fp, "classDef metaCls_rootCls stroke:#f00,stroke-width:5px\n"); - std::fprintf(fp, "\n"); - std::fclose(fp); + fmt::print(fp.get(), "classDef inputCls fill:#afa\n"); + fmt::print(fp.get(), "classDef outputCls fill:#ffa\n"); + fmt::print(fp.get(), "classDef externalCls fill:#ccc\n"); + fmt::print(fp.get(), "classDef producerCls fill:#ccf\n"); + fmt::print(fp.get(), "classDef genericCls fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n"); + fmt::print(fp.get(), "classDef metaCls stroke-width:5px\n"); + fmt::print(fp.get(), "classDef rootCls stroke:#f00\n"); + fmt::print(fp.get(), "classDef producerCls_rootCls stroke:#f00,fill:#ccf\n"); + fmt::print(fp.get(), "classDef genericCls_rootCls stroke:#f00,fill:#f9f9ff,stroke-width:1px,stroke-dasharray: 5 5\n"); + fmt::print(fp.get(), "classDef metaCls_rootCls stroke:#f00,stroke-width:5px\n"); + fmt::print(fp.get(), "\n"); } /////////////////////////////////////////////////////// @@ -448,7 +453,7 @@ Aidge::GraphView::outputs(std::string nodeName) const { void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/, Aidge::IOIndex_t /*newNodeOutID*/) { - printf("Not implemented yet.\n"); + fmt::print("Not implemented yet.\n"); } void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) { @@ -705,7 +710,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const { std::vector<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents(const std::string nodeName) const { std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName); if (it == mNodeRegistry.end()) { - printf("No such node a %s in %s graph.\n", nodeName.c_str(), name().c_str()); + fmt::print("No such node a {} in {} graph.\n", nodeName, name()); exit(-1); } return (it->second)->getParents(); @@ -734,8 +739,7 @@ Aidge::GraphView::getChildren(const std::string nodeName) const { std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName); if (it == mNodeRegistry.end()) { - printf("No such node a %s in %s graph.\n", nodeName.c_str(), - name().c_str()); + fmt::print("No such node a {} in {} graph.\n", nodeName, name()); exit(-1); } return (it->second)->getOrderedChildren(); @@ -745,7 +749,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const { std::set<std::shared_ptr<Node>>::const_iterator it = mNodes.find(otherNode); if (it == mNodes.end()) { - printf("No such node in graph.\n"); + fmt::print("No such node in graph.\n"); exit(-1); } return (*it)->getChildren(); @@ -759,7 +763,7 @@ Aidge::GraphView::getNode(const std::string& nodeName) const { if (it != mNodeRegistry.cend()) { return it->second; } else { - printf("No Node named %s in the current GraphView.\n", nodeName.c_str()); + fmt::print("No Node named {} in the current GraphView.\n", nodeName); return nullptr; } } @@ -808,13 +812,13 @@ void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnab bool Aidge::GraphView::swap(Node & /*node*/, Node & /*otherNode*/) { - printf("Swap() not implementated yet. Return false.\n"); + fmt::print("Swap() not implementated yet. Return false.\n"); return false; } void Aidge::GraphView::link(std::string /*name1_inID*/, std::string /*name2_outID*/) { - printf("Not implemented yet.\n"); + fmt::print("Not implemented yet.\n"); } void Aidge::GraphView::insertParent(NodePtr childNode, diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index 33f8f1838bb4418e945d7bb6b2b9be957a6aca10..cbaa4e59eeeb445f5e29f0178c001e0942b3df63 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -171,7 +171,7 @@ Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const { void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) { assert(inId != gk_IODefaultIndex && (inId < nbInputs()) && "Must be a valid index"); if (mIdOutParents[inId] != gk_IODefaultIndex) { - std::printf("Warning: filling a Tensor already attributed\n"); + fmt::print("Warning: filling a Tensor already attributed\n"); auto originalParent = input(inId); // remove original parent reference to child // find the output ID for original Parent @@ -190,7 +190,7 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou assert((otherInId < otherNode->nbInputs()) && "Input index out of bound."); assert((outId < nbOutputs()) && "Output index out of bound."); if (otherNode->input(otherInId).second != gk_IODefaultIndex) { - std::printf("Warning, the %d-th Parent of the child node already existed.\n", otherInId); + fmt::print("Warning, the {}-th Parent of the child node already existed.\n", otherInId); } // manage tensors and potential previous parent otherNode->setInputId(otherInId, outId); @@ -208,7 +208,7 @@ void Aidge::Node::addChildView(std::shared_ptr<GraphView> otherGraph, const IOIn assert((outId < nbOutputs()) && "Output index out of bound."); std::set<std::shared_ptr<Node>> inNodes = otherGraph->inputNodes(); if (inNodes.size() == std::size_t(0)) { // no input Node - printf("Cannot add GraphView to the Node. No input node detected.\n"); + fmt::print("Cannot add GraphView to the Node. No input node detected.\n"); } else // inNodes.size() >= 1 { assert((inNodes.find(otherInId.first) != @@ -242,7 +242,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) { if (getParent(inId) != nullptr) { - printf("Warning, you're replacing a Parent.\n"); + fmt::print("Warning, you're replacing a Parent.\n"); } assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound."); mParents[inId] = other_node; diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index 7560daf0ed09d79ed1c6655686aa0216542ae26d..c0ada265410f9bc46aab3b43fae270f1e74dd5eb 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -20,19 +20,13 @@ void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { - if (inputIdx >= nbInputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} inputs", type().c_str(), nbInputs()); - } - if (strcmp((data)->type(), Tensor::Type) != 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Input data must be of Tensor type"); - } + AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); + AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type"); mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); } void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { - if (strcmp(data->type(), "Tensor") != 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator only accepts Tensors as inputs", type().c_str()); - } + AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); if (getInput(inputIdx)) { *mInputs[inputIdx] = *std::dynamic_pointer_cast<Tensor>(data); } else { @@ -43,9 +37,7 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, const std: Aidge::OperatorTensor::~OperatorTensor() = default; void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::shared_ptr<Aidge::Data>&& data) { - if (strcmp(data->type(), "Tensor") != 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator only accepts Tensors as inputs", type().c_str()); - } + AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); if (getInput(inputIdx)) { *mInputs[inputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data)); } else { @@ -54,36 +46,24 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::share } const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const { - if (inputIdx >= nbInputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} inputs", type().c_str(), nbInputs()); - } + AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); return mInputs[inputIdx]; } void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) { - if (strcmp(data->type(), "Tensor") != 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator only accepts Tensors as inputs", type().c_str()); - } - if (outputIdx >= nbOutputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} outputs", type().c_str(), nbOutputs()); - } + AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); + AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data); } void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) { - if (strcmp(data->type(), "Tensor") != 0) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator only accepts Tensors as inputs", type().c_str()); - } - if (outputIdx >= nbOutputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} outputs", type().c_str(), nbOutputs()); - } + AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); + AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data)); } const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const { - if (outputIdx >= nbOutputs()) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{} Operator has {} outputs", type().c_str(), nbOutputs()); - } + AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); return mOutputs[outputIdx]; } diff --git a/src/recipies/FuseBatchNorm.cpp b/src/recipies/FuseBatchNorm.cpp index 2fb017567550ada083d0d79d0323b0b45998026f..247de86fdac6366aa8ac963ac65c55c3c30e57a0 100644 --- a/src/recipies/FuseBatchNorm.cpp +++ b/src/recipies/FuseBatchNorm.cpp @@ -89,13 +89,13 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode, meanVariance += b_var.get<float>(outChId); ++count; } else { - printf("Zero-variance: %s [%lu]\n", convNode->name().c_str(), outChId); + fmt::print("Zero-variance: {} [{}]\n", convNode->name(), outChId); } } if (count > 0) meanVariance /= count; else { - printf("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n"); + fmt::print("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n"); } std::shared_ptr<Tensor> weightBuf, biasBuf; @@ -172,7 +172,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::MatchSolution> solution) { void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) { std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>(); regex->setNodeKey("BatchNorm", "getType($) =='BatchNorm'"); - printf("\n============================\nSearching for solutions\n==============================\n"); + fmt::print("\n============================\nSearching for solutions\n==============================\n"); regex->setNodeKey( "OP", "getType($) =='Conv' || getType($) =='ConvDepthWise' || getType($) =='PaddedConv' || getType($) =='PaddedConvDepthWise'"); diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index 7562944a98865c21296396473a0b39ec0b591dbc..d370e06a812b5d6bed139e98e617107e21938411 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -17,6 +17,7 @@ #include <string> #include <fmt/ranges.h> +#include <fmt/color.h> #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" @@ -35,7 +36,7 @@ void drawProgressBar(double progress, int barWidth, const std::string& additiona else putchar(' '); } - printf("] %d%% | %s\r", static_cast<int>(progress * 100), additionalInfo.c_str()); + fmt::print("] {}% | {}\r", static_cast<int>(progress * 100), additionalInfo); fflush(stdout); } @@ -79,18 +80,15 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { // If the prior node is of another type, it replaces the initial consumer // in the new priorConsumers list. The initial consumer will become // again a consumer later, by construction. - if (verbose) printf("List of consumers with their priors:\n"); + if (verbose) fmt::print("List of consumers with their priors:\n"); std::set<std::shared_ptr<Node>> requiredProducers; std::set<std::shared_ptr<Node>> priorConsumers; for (const auto& consumer : consumers) { if (verbose) { - printf("\t- consumer: " - "\x1b[1;37m" - "%s" - "\x1b[0m" - "\n", - namePtrTable[consumer].c_str()); + fmt::print("\t- consumer: "); + fmt::print(fg(fmt::color::orange), namePtrTable[consumer]); + fmt::print("\n"); } const auto& prior = getPriorProducersConsumers(consumer); @@ -100,13 +98,13 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { std::vector<std::string> requiredProducersName; std::transform(prior.requiredProducers.begin(), prior.requiredProducers.end(), std::back_inserter(requiredProducersName), - [&namePtrTable](auto val){ return namePtrTable[val].c_str(); }); + [&namePtrTable](auto val){ return namePtrTable[val]; }); fmt::print("\t\trequired producers: {}\n", requiredProducersName); std::vector<std::string> priorConsumersName; std::transform(prior.priorConsumers.begin(), prior.priorConsumers.end(), std::back_inserter(priorConsumersName), - [&namePtrTable](auto val){ return namePtrTable[val].c_str(); }); + [&namePtrTable](auto val){ return namePtrTable[val]; }); fmt::print("\t\tprior consumers: {}\n", priorConsumersName); } @@ -136,27 +134,24 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { // runnable because some may depend on the execution of others (when // there is multiple successive priors for example). std::set<std::shared_ptr<Node>> runnableConsumers; - if (verbose) printf("Updated list of consumers:\n"); + if (verbose) fmt::print("Updated list of consumers:\n"); for (const auto& consumer : consumers) { if (verbose) { - printf("\t- consumer: " - "\x1b[1;37m" - "%s" - "\x1b[0m" - "\n\t\tC/R:\t", - namePtrTable[consumer].c_str()); + fmt::print("\t- consumer: "); + fmt::print(fg(fmt::color::orange), namePtrTable[consumer]); + fmt::print("\n\t\tC/R:\t"); for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) { - printf("%zu/%zu\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId), + fmt::print("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId), consumer->getOperator()->getNbRequiredData(inId)); } - printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1), + fmt::print("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1), consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1)); - printf("\n\t\tP:\t"); + fmt::print("\n\t\tP:\t"); for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) { - printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId)); + fmt::print("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId)); } - printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); - printf("\n"); + fmt::print("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); + fmt::print("\n"); } bool isRunnable = true; @@ -164,7 +159,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { if (/*consumer->getOperator()->getNbRequiredData(inputIdx) > 0 && */(consumer->getOperator()->getNbConsumedData(inputIdx) + consumer->getOperator()->getNbRequiredData(inputIdx)) > getNbAvailableData(consumer, inputIdx)) { - if (verbose) printf(" not runnable: C%zu + R%zu > P%zu for input #%d\n", + if (verbose) fmt::print(" not runnable: C{} + R{} > P{} for input #{}\n", consumer->getOperator()->getNbConsumedData(inputIdx), consumer->getOperator()->getNbRequiredData(inputIdx), getNbAvailableData(consumer, inputIdx), inputIdx); @@ -182,7 +177,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { // 5) If not consumer is runnable, it is a stop condition! if (runnableConsumers.empty()) { - if (verbose) printf("********************\n"); + if (verbose) fmt::print("********************\n"); // No consumer is runnable: some required data is missing for all of // them. There is two possibilities: // - At least one required data source is exhausted, which may be @@ -197,29 +192,29 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { // At this point, simultaneously runnable consumers have no data // dependency and could be run in parallel! for (const auto& runnable : runnableConsumers) { - if (verbose) printf("Runnable: %s\n", namePtrTable[runnable].c_str()); + if (verbose) fmt::print("Runnable: {}\n", namePtrTable[runnable]); runnable->getOperator()->updateConsummerProducer(); mStaticSchedule.back().push_back(runnable); } // 7) Update consumers list - if (verbose) printf("Updating producer and consumer lists...\n"); + if (verbose) fmt::print("Updating producer and consumer lists...\n"); for (const auto& consumer : runnableConsumers) { if (verbose) { - printf("\t- consumer: %s\n\t\tC/R:\t", - namePtrTable[consumer].c_str()); + fmt::print("\t- consumer: {}\n\t\tC/R:\t", + namePtrTable[consumer]); for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) { - printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId), + fmt::print("{}/{}\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId), consumer->getOperator()->getNbRequiredData(inId)); } - printf("%zu/%zu", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1), + fmt::print("{}/{}", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1), consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1)); - printf("\n\t\tP:\t"); + fmt::print("\n\t\tP:\t"); for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) { - printf("%zu\n\t\t\t", consumer->getOperator()->getNbProducedData(outId)); + fmt::print("{}\n\t\t\t", consumer->getOperator()->getNbProducedData(outId)); } - printf("%zu", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); - printf("\n"); + fmt::print("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); + fmt::print("\n"); } // 7.1) If the current consumer has still data to consume, it will @@ -229,7 +224,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) { if (consumer->getOperator()->getNbConsumedData(inputIdx) < getNbAvailableData(consumer, inputIdx)) { - if (verbose) printf(" still consumer: C%zu < P%zu for input #%d\n", + if (verbose) fmt::print(" still consumer: C{} < P{} for input #{}\n", consumer->getOperator()->getNbConsumedData(inputIdx), getNbAvailableData(consumer, inputIdx), inputIdx); @@ -258,7 +253,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { } /* if (consumer->getOperator()->getNbProducedData(outId) > 0) { - if (verbose) printf(" also producer\n"); + if (verbose) fmt::print(" also producer\n"); // make sure consumer is also a producer producers.insert(consumer); @@ -272,7 +267,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { consumers.erase(consumer); if (isProducer) { - if (verbose) printf(" also producer\n"); + if (verbose) fmt::print(" also producer\n"); // make sure consumer is also a producer producers.insert(consumer); @@ -295,13 +290,13 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { stillConsumers.clear(); } - if (verbose) printf("********************\n"); + if (verbose) fmt::print("********************\n"); } while (!consumers.empty()); if (verbose) { if (!consumers.empty()) { - printf("/!\\ Remaining consumers: possible dead-lock\n"); - printf("********************\n"); + fmt::print("/!\\ Remaining consumers: possible dead-lock\n"); + fmt::print("********************\n"); } } } @@ -452,8 +447,7 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve size_t cpt = 0; for (const auto& runnable : mStaticSchedule.at(mStaticScheduleStep)) { if (verbose) - printf("run: %s\n", - namePtrTable[runnable].c_str()); + fmt::print("run: {}\n", namePtrTable[runnable]); else drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50, (std::string("running ") + namePtrTable[runnable])); @@ -464,14 +458,20 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve cpt++; } if (!verbose) drawProgressBar(1.0, 50, " "); - printf("\n"); + fmt::print("\n"); ++mStaticScheduleStep; } void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const { auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose); - std::fprintf(fp.get(), "gantt\ndateFormat x\naxisFormat %%Q µs\n\n"); + + if (!fp) { + AIDGE_THROW_OR_ABORT(std::runtime_error, + "Could not create scheduling diagram log file: {}", fileName + ".mmd"); + } + + fmt::print(fp.get(), "gantt\ndateFormat x\naxisFormat %Q µs\n\n"); if (!mScheduling.empty()) { const std::map<std::shared_ptr<Node>, std::string> namePtrTable @@ -479,14 +479,14 @@ void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileNa const auto globalStart = mScheduling[0].start; for (const auto& element : mScheduling) { - std::fprintf(fp.get(), "%s :%ld, %ld\n", - namePtrTable.at(element.node).c_str(), + fmt::print(fp.get(), "{} :{}, {}\n", + namePtrTable.at(element.node), std::chrono::duration_cast<std::chrono::microseconds>(element.start - globalStart).count(), std::chrono::duration_cast<std::chrono::microseconds>(element.end - globalStart).count()); } } - std::fprintf(fp.get(), "\n"); + fmt::print(fp.get(), "\n"); } std::set<std::shared_ptr<Aidge::Node>> Aidge::SequentialScheduler::getConsumers( diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp index ebbfb3ad89721eb4f1390c3efca475acbb0b6f46..437780b959b37e0cf6b5b7796e71c9b931f25bc0 100644 --- a/unit_tests/graph/Test_GraphView.cpp +++ b/unit_tests/graph/Test_GraphView.cpp @@ -74,7 +74,7 @@ TEST_CASE("genRandomGraph", "[GraphView][randomGen]") { } } - printf("nbUnicity = %zu/%zu\n", nbUnicity, nbTests); + fmt::print("nbUnicity = {}/{}\n", nbUnicity, nbTests); } TEST_CASE("clone", "[GraphView][clone]") { @@ -147,7 +147,7 @@ TEST_CASE("clone_with_delete", "[GraphView][cloneDelete]") { ++seed; } - printf("nbClonedWithDelete = %zu/%zu\n", nbClonedWithDelete, nbTests); + fmt::print("nbClonedWithDelete = {}/{}\n", nbClonedWithDelete, nbTests); } TEST_CASE("remove", "[GraphView][remove]") { @@ -205,7 +205,7 @@ TEST_CASE("remove", "[GraphView][remove]") { } } - printf("nbTested = %zu/%zu\n", nbTested, nbTests); + fmt::print("nbTested = {}/{}\n", nbTested, nbTests); } TEST_CASE("[core/graph] GraphView(Constructor)", "[GraphView][constructor()]") { @@ -381,7 +381,7 @@ TEST_CASE("[core/graph] GraphView(save)") { g1->addChild(conv5, "c4", 0, 0); g1->save("./graphExample"); - printf("File saved in ./graphExample.md\n"); + fmt::print("File saved in ./graphExample.md\n"); } TEST_CASE("[core/graph] GraphView(resetConnections)") { diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index 2932514ca3a51c1c74eb583133b9a0d3557e8b3a..7e28f1fadc56855d266c1e8547261f5903f8c724 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -66,12 +66,12 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") { std::vector<std::string> nodesName; std::transform(sch.begin(), sch.end(), std::back_inserter(nodesName), - [&namePtrTable](auto val){ return namePtrTable.at(val).c_str(); }); + [&namePtrTable](auto val){ return namePtrTable.at(val); }); fmt::print("schedule: {}\n", nodesName); REQUIRE(sch.size() == 10 + orderedInputs.size()); } } - printf("nbUnicity = %zu/%zu\n", nbUnicity, nbTests); + fmt::print("nbUnicity = {}/{}\n", nbUnicity, nbTests); }