From 7582bff4b15dadbff73cdb32276f4d2dd040bafc Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Wed, 7 Aug 2024 12:43:24 +0000 Subject: [PATCH] Remove more inline warning from log and Tensor, fix metaop names --- include/aidge/data/Tensor.hpp | 17 +------ include/aidge/operator/Operator.hpp | 4 +- include/aidge/utils/Log.hpp | 14 ++--- include/aidge/utils/Registrar.hpp | 8 ++- src/data/Tensor.cpp | 51 +++++++++---------- src/operator/MetaOperatorDefs/PaddedConv.cpp | 21 +++++--- .../MetaOperatorDefs/PaddedConvDepthWise.cpp | 19 ++++--- 7 files changed, 62 insertions(+), 72 deletions(-) diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index b4c5de2eb..3ee64ceca 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -346,22 +346,7 @@ public: * @param copyFrom If true (default), move data from previous backend/device * to the new one. Previous data is lost otherwise. */ - inline void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true) { - if (mImpl) { - if (mImpl->device() != std::make_pair(name, device)) { - // Backend change: create new impl, copy from old to new and replace - // impl - std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims); - if (copyFrom) { - newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0); - } - setImpl(newImpl); - } - } - else { - mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims); - } - } + void setBackend(const std::string &name, DeviceIdx_t device = 0, bool copyFrom = true); /** * @brief Get a list of available backends. diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index f1e25b7a1..c938fc362 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -196,8 +196,8 @@ public: } inline InputCategory inputCategory(IOIndex_t idx) const { - AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size()); - return mInputsCategory[idx]; + // AIDGE_ASSERT(idx < mInputsCategory.size(), "Input #{} out of range (number of inputs is {})", idx, mInputsCategory.size()); + return mInputsCategory.at(idx); } virtual inline bool isAtomic() const noexcept { return true; } diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp index f198e83fb..6b2ace1c6 100644 --- a/include/aidge/utils/Log.hpp +++ b/include/aidge/utils/Log.hpp @@ -74,7 +74,7 @@ public: * inducing no runtime overhead for Release. */ template <typename... Args> - constexpr static void debug(Args&&... args) { + static void debug(Args&&... args) { #ifndef NDEBUG // only when compiled in Debug log(Debug, fmt::format(std::forward<Args>(args)...)); @@ -90,7 +90,7 @@ public: * performed nominally. */ template <typename... Args> - constexpr static void info(Args&&... args) { + static void info(Args&&... args) { log(Info, fmt::format(std::forward<Args>(args)...)); } @@ -101,7 +101,7 @@ public: * performed normally. */ template <typename... Args> - constexpr static void notice(Args&&... args) { + static void notice(Args&&... args) { log(Notice, fmt::format(std::forward<Args>(args)...)); } @@ -112,7 +112,7 @@ public: * still provide an exploitable result. */ template <typename... Args> - constexpr static void warn(Args&&... args) { + static void warn(Args&&... args) { log(Warn, fmt::format(std::forward<Args>(args)...)); } @@ -123,7 +123,7 @@ public: * further operations. */ template <typename... Args> - constexpr static void error(Args&&... args) { + static void error(Args&&... args) { log(Error, fmt::format(std::forward<Args>(args)...)); } @@ -134,14 +134,14 @@ public: * impossible. */ template <typename... Args> - constexpr static void fatal(Args&&... args) { + static void fatal(Args&&... args) { log(Fatal, fmt::format(std::forward<Args>(args)...)); } /** * Set the minimum log level displayed in the console. */ - constexpr static void setConsoleLevel(Level level) { + static void setConsoleLevel(Level level) { mConsoleLevel = level; } diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp index 47bb05ce7..872c3f6b5 100644 --- a/include/aidge/utils/Registrar.hpp +++ b/include/aidge/utils/Registrar.hpp @@ -75,11 +75,9 @@ struct Registrar { return (C::registry().find(key) != C::registry().cend()); } - static auto create(const registrar_key& key){ - const auto it = C::registry().find(key); - AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name()); - - return (*it).second; + static auto create(const registrar_key& key) { + AIDGE_ASSERT(exists(key), "missing or invalid registrar key: {} for registrable object {}\nDid you include/import the corresponding module?\nIf so, it is possible that the object is not yet supported.", key, typeid(C).name()); + return C::registry()[key]; } static std::vector<registrar_key> getKeys(){ std::vector<registrar_key> keys; diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index d1bf32594..20bf3fb78 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -25,15 +25,9 @@ #include "aidge/utils/Types.h" -/** - * @brief Element-wise addition operation for two ``Tensor``s. - * @note ``Tensor``s should be stored on the same backend. - * @todo If input ``Tensor``s have a different dataType, the output should - * have the dataType of the ``Tensor`` with the highest precision. - * - * @param other - * @return Tensor - */ +Aidge::Tensor::~Tensor() noexcept = default; + + Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); @@ -50,15 +44,7 @@ Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const { return add_.getOutput(0)->clone(); } -/** - * @brief Element-wise substraction operation for two ``Tensor``s. - * @note ``Tensor``s should be stored on the same backend. - * @todo If input ``Tensor``s have a different dataType, the output should - * have the dataType of the ``Tensor`` with the highest precision. - * - * @param other - * @return Tensor - */ + Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const { AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); @@ -75,15 +61,7 @@ Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const { return sub_.getOutput(0)->clone(); } -/** - * @brief Element-wise multiplication operation for two ``Tensor``s. - * @note ``Tensor``s should be stored on the same backend. - * @todo If input ``Tensor``s have a different dataType, the output should - * have the dataType of the ``Tensor`` with the highest precision. - * - * @param other - * @return Tensor - */ + Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const { AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); @@ -100,6 +78,7 @@ Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const { return mul_.getOutput(0)->clone(); } + Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const { AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); @@ -146,7 +125,23 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { return *this; } -Aidge::Tensor::~Tensor() noexcept = default; + +void Aidge::Tensor::setBackend(const std::string &name, Aidge::DeviceIdx_t device, bool copyFrom) { + if (mImpl) { + if (mImpl->device() != std::make_pair(name, device)) { + // Backend change: create new impl, copy from old to new and replace + // impl + std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims); + if (copyFrom) { + newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0); + } + setImpl(newImpl); + } + } + else { + mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims); + } + } void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims, std::vector<Aidge::DimSize_t> strides) { diff --git a/src/operator/MetaOperatorDefs/PaddedConv.cpp b/src/operator/MetaOperatorDefs/PaddedConv.cpp index c70541f8b..31b1c675e 100644 --- a/src/operator/MetaOperatorDefs/PaddedConv.cpp +++ b/src/operator/MetaOperatorDefs/PaddedConv.cpp @@ -33,15 +33,22 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConv(Aidge::DimSize_t in_channels, const std::array<Aidge::DimSize_t, DIM> &dilation_dims, bool no_bias) { - auto metaOp = std::make_shared<Node>(PaddedConv_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name); - if (!name.empty()) { - std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName(); - } - addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w"); + // auto metaOp = PaddedConv_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims); + // if (!name.empty()) { + // metaOp->getMicroGraph()->setName(name); + // metaOp->getMicroGraph()->setNodesName(); + // } + // auto metaOpNode = std::make_shared<Node>(metaOp, name); + auto graph = Sequential({ + Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""), + std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "") + }); + auto metaOpNode = MetaOperator("PaddedConv", graph, name); + addProducer(metaOpNode, 1, append(out_channels, append(in_channels, kernel_dims)), "w"); if (!no_bias) { - addProducer(metaOp, 2, {out_channels}, "b"); + addProducer(metaOpNode, 2, {out_channels}, "b"); } - return metaOp; + return metaOpNode; } template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<1>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool); template std::shared_ptr<Aidge::Node> Aidge::PaddedConv<2>(const Aidge::DimSize_t, const Aidge::DimSize_t, const std::array<Aidge::DimSize_t,2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool); diff --git a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp index 0f686f22a..1c073b78a 100644 --- a/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp +++ b/src/operator/MetaOperatorDefs/PaddedConvDepthWise.cpp @@ -32,15 +32,20 @@ std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise(const Aidge::DimSize_t n const std::array<Aidge::DimSize_t, DIM> &dilation_dims, bool no_bias) { - auto metaOp = std::make_shared<Node>(PaddedConvDepthWise_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name); - if (!name.empty()) { - std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName(); - } - addProducer(metaOp, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w"); + // auto metaOp = std::make_shared<Node>(PaddedConvDepthWise_Op<DIM>(kernel_dims, stride_dims, padding_dims, dilation_dims), name); + // if (!name.empty()) { + // std::static_pointer_cast<MetaOperator_Op>(metaOp->getOperator())->getMicroGraph()->setNodesName(); + // } + auto graph = Sequential({ + Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""), + std::make_shared<Node>(std::make_shared<ConvDepthWise_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv_depth_wise" : "") + }); + auto metaOpNode = MetaOperator("PaddedConvDepthWise", graph, name); + addProducer(metaOpNode, 1, append(nb_channels, append(Aidge::DimSize_t(1), kernel_dims)), "w"); if (!no_bias) { - addProducer(metaOp, 2, {nb_channels}, "b"); + addProducer(metaOpNode, 2, {nb_channels}, "b"); } - return metaOp; + return metaOpNode; } template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<1>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 1>&, const std::string&, const std::array<Aidge::DimSize_t, 1>&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 1>&, bool); template std::shared_ptr<Aidge::Node> Aidge::PaddedConvDepthWise<2>(const Aidge::DimSize_t, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 4>&, const std::array<Aidge::DimSize_t, 2>&, bool); -- GitLab