diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml index a27b3c0f3c8a836722f73fe5d61a370f3b61f3f1..da0d23c9de978ebcdbb370a6f4a92262829e05b9 100644 --- a/.gitlab/ci/build.gitlab-ci.yml +++ b/.gitlab/ci/build.gitlab-ci.yml @@ -38,15 +38,25 @@ build:windows_cpp: tags: - windows - image: mcr.microsoft.com/windows/servercore:ltsc2022 + image: buildtools before_script: - - Invoke-WebRequest -UseBasicParsing "https://github.com/Kitware/CMake/releases/download/v3.27.4/cmake-3.27.4-windows-x86_64.msi" -o cmake-3.27.4-windows-x86_64.msi - - Start-Process -Wait -FilePath MsiExec.exe -ArgumentList '/i cmake-3.27.4-windows-x86_64.msi ADD_CMAKE_TO_PATH=System /qn' - - Invoke-WebRequest -UseBasicParsing "https://aka.ms/vs/17/release/vs_buildtools.exe" -o vs_buildtools.exe - - Start-Process -Wait -FilePath .\vs_buildtools.exe -ArgumentList '-q' + # Install Chocolatey + - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + # Install dependencies + - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + - choco install git -Y + - choco install python -Y + # Update PATH + - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") script: - mkdir -p build_cpp - mkdir -p install_cpp - cd build_cpp - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug .. - - make -j2 all install + - cmake --build . -j2 + - cmake --install . --config Debug + + artifacts: + paths: + - build_cpp/ + - install_cpp/ diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml index 25ddfca3dadd420464f4037a8c99dc1e3122f80a..1e67ce273abc7d6b02f9e3148264ff3f9ea1cf07 100644 --- a/.gitlab/ci/test.gitlab-ci.yml +++ b/.gitlab/ci/test.gitlab-ci.yml @@ -25,3 +25,23 @@ test:ubuntu_python: artifacts: reports: junit: aidge_core/xmlrunner-results.xml + +test:windows_cpp: + stage: test + needs: ["build:windows_cpp"] + tags: + - windows + image: buildtools + before_script: + # Install Chocolatey + - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + # Install dependencies + - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + # Update PATH + - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") + script: + - cd build_cpp + - ctest --output-junit ctest-results.xml --output-on-failure + artifacts: + reports: + junit: build_cpp/ctest-results.xml diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index e04912c637d5339600c1708a7fb3b68c3ddb494c..091bdbc0f0681352d6983b231c3a68a50a2be716 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_IMPORTS_H__ -#define __AIDGE_IMPORTS_H__ +#ifndef AIDGE_IMPORTS_H_ +#define AIDGE_IMPORTS_H_ #include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/TensorImpl.hpp" @@ -51,4 +51,4 @@ //#include "aidge/utilsParsing/AstNode.hpp" //#include "aidge/utilsParsing/ParsingToken.hpp" -#endif /* __AIDGE_IMPORTS_H__ */ +#endif /* AIDGE_IMPORTS_H_ */ diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp index 7e022145d1eeaa8a2bd79afe69ca06ca57a62651..5aa2829e16f612b0867ab69feccb829ba2095e1b 100644 --- a/include/aidge/backend/OperatorImpl.hpp +++ b/include/aidge/backend/OperatorImpl.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_OPERATORIMPL_H__ -#define __AIDGE_OPERATORIMPL_H__ +#ifndef AIDGE_OPERATORIMPL_H_ +#define AIDGE_OPERATORIMPL_H_ #include <cstddef> #include <vector> @@ -57,4 +57,4 @@ public: }; } // namespace Aidge -#endif /* __AIDGE_OPERATORIMPL_H__ */ +#endif /* AIDGE_OPERATORIMPL_H_ */ diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp index 66e388f5315447e92fa6497ed5f08550e2cb6d89..b54d8b5d7cebdde1a938090f779fdd61663b5014 100644 --- a/include/aidge/backend/TensorImpl.hpp +++ b/include/aidge/backend/TensorImpl.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_TENSORIMPL_H__ -#define __AIDGE_TENSORIMPL_H__ +#ifndef AIDGE_TENSORIMPL_H_ +#define AIDGE_TENSORIMPL_H_ #include <cstddef> #include <cstdio> @@ -26,7 +26,7 @@ public: virtual void setRawPtr(void* /*ptr*/) { printf("Cannot set raw pointer for backend %s\n", mBackend); - }; + }; virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes) constexpr const char *backend() const { return mBackend; } virtual ~TensorImpl() = default; @@ -38,4 +38,4 @@ private: } // namespace Aidge -#endif /* __AIDGE_TENSORIMPL_H__ */ +#endif /* AIDGE_TENSORIMPL_H_ */ diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp index 4edc4b9a5a9fd877cf9a3e84c7f644be2a11534a..81b7810a8a548df7e5a2829b1a31cbe337491382 100644 --- a/include/aidge/data/Data.hpp +++ b/include/aidge/data/Data.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_DATA_H__ -#define __AIDGE_DATA_H__ +#ifndef AIDGE_DATA_H_ +#define AIDGE_DATA_H_ #include "aidge/utils/Parameter.hpp" @@ -66,10 +66,10 @@ template <> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32 template <> const char* const EnumStrings<Aidge::DataType>::data[] - = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary", - "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16", - "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6", + = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary", + "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16", + "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6", "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"}; } -#endif /* __AIDGE_DATA_H__ */ \ No newline at end of file +#endif /* AIDGE_DATA_H_ */ \ No newline at end of file diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 01e2a5a51d86c28d3a89bd9085c60bfad297623f..c3a6e478f8943253a9f9b3565db2d4452a9ca133 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_DATA_TENSOR_H__ -#define __AIDGE_CORE_DATA_TENSOR_H__ +#ifndef AIDGE_CORE_DATA_TENSOR_H_ +#define AIDGE_CORE_DATA_TENSOR_H_ #include <cstring> #include <set> @@ -156,10 +156,10 @@ class Tensor : public Data, * @param dataType Sets the type of inserted data. */ Tensor(DataType dataType = DataType::Float32) - : Data(Type), - mDataType(dataType), - mDims({}), - mSize(0), + : Data(Type), + mDataType(dataType), + mDims({}), + mSize(0), mSizeM1(0) { // ctor @@ -167,14 +167,14 @@ class Tensor : public Data, /** * @brief Construct a new Tensor object copied from another one. - * @param otherTensor + * @param otherTensor */ Tensor(const Tensor& otherTensor) - : Data(Type), - mDataType(otherTensor.mDataType), - mDims(otherTensor.mDims), - mSize(otherTensor.mSize), - mSizeM1(otherTensor.mSizeM1) + : Data(Type), + mDataType(otherTensor.mDataType), + mDims(otherTensor.mDims), + mSize(otherTensor.mSize), + mSizeM1(otherTensor.mSizeM1) { if (otherTensor.hasImpl()) { mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), dataType()})(*this); @@ -312,7 +312,7 @@ class Tensor : public Data, /** * @brief Assess data type, dimensions, backend and data are the same. - * @param otherTensor + * @param otherTensor */ bool operator==(const Tensor &otherTensor) const { if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) || @@ -325,7 +325,7 @@ class Tensor : public Data, /** * @brief Set the backend of the Tensor associated implementation * @details Create and initialized an implementation if non was associated. - * @param name + * @param name */ inline void setBackend(const std::string &name) { if (mImpl) { @@ -342,7 +342,7 @@ class Tensor : public Data, /** * @brief Get a list of available backends. - * @return std::set<std::string> + * @return std::set<std::string> */ static std::set<std::string> getAvailableBackends(){ std::set<std::string> backendsList; @@ -353,7 +353,7 @@ class Tensor : public Data, /** * @brief Get the data type enum. - * @return constexpr DataType + * @return constexpr DataType */ constexpr DataType dataType() const { return mDataType; } @@ -376,27 +376,27 @@ class Tensor : public Data, /** * @brief Get the Impl object - * @return constexpr const std::unique_ptr<TensorImpl>& + * @return constexpr const std::unique_ptr<TensorImpl>& */ constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; } /** * @brief Return if an implementaiton has been associated. - * @return true - * @return false + * @return true + * @return false */ bool hasImpl() const { return (mImpl) ? true : false; } /** * @brief Get number of dimensions of the Tensor. - * @return std::size_t + * @return std::size_t */ inline std::size_t nbDims() const { return mDims.size(); } /** * @brief Get dimensions of the Tensor object. * @tparam DIM number of dimensions. - * @return constexpr std::array<DimSize_t, DIM> + * @return constexpr std::array<DimSize_t, DIM> */ template <DimIdx_t DIM> constexpr std::array<DimSize_t, DIM> dims() const { @@ -406,26 +406,26 @@ class Tensor : public Data, /** * @brief Get dimensions of the Tensor object. - * @return constexpr const std::vector<DimSize_t>& + * @return constexpr const std::vector<DimSize_t>& */ constexpr const std::vector<DimSize_t> &dims() const { return mDims; } /** * @brief Get the number of elements in the Tensor object. - * @return constexpr std::size_t + * @return constexpr std::size_t */ constexpr std::size_t size() const { return mSize; } /** * @brief Get the number of elements in the N-1 dimensions of the Tensor object. - * @return constexpr std::size_t + * @return constexpr std::size_t */ constexpr std::size_t sizeM1() const { return mSizeM1; } /** * @brief Change the shape of the Tensor object according to the given argument. * @tparam DIM new dimensions. - * @param dims + * @param dims */ template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly void resize(const std::array<DimSize_t, DIM> &dims) { @@ -441,8 +441,8 @@ class Tensor : public Data, /** * @brief Return if the Tensor object has at leastone element. - * @return true - * @return false + * @return true + * @return false */ bool empty() const { return mDims.empty(); } @@ -540,8 +540,8 @@ class Tensor : public Data, } } } - - + + res += "}"; return res; } @@ -575,10 +575,10 @@ private: mSizeM1 = std::accumulate(++mDims.begin(),mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>()); mSize = static_cast<std::size_t>(mSizeM1 * mDims[0]); } - + return mSize; } }; } // namespace Aidge -#endif /* __AIDGE_CORE_DATA_TENSOR_H__ */ +#endif /* AIDGE_CORE_DATA_TENSOR_H_ */ diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp index c5dde5c97c61d3661c1ee9cebe7cc17080950eb9..599ca7d6defd729b6e6536dcc95f326d345701d9 100644 --- a/include/aidge/graph/Connector.hpp +++ b/include/aidge/graph/Connector.hpp @@ -8,8 +8,8 @@ * SPDX-License-Identifier: EPL-2.0 * ********************************************************************************/ -#ifndef __AIDGE_CORE_GRAPH_CONNECTOR_H__ -#define __AIDGE_CORE_GRAPH_CONNECTOR_H__ +#ifndef AIDGE_CORE_GRAPH_CONNECTOR_H_ +#define AIDGE_CORE_GRAPH_CONNECTOR_H_ #include <cassert> #include <memory> @@ -18,7 +18,7 @@ #include "aidge/utils/Types.h" namespace Aidge { - + class Node; class GraphView; /** @@ -83,4 +83,4 @@ class Connector { std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors); } // namespace Aidge -#endif /* __AIDGE_CORE_GRAPH_CONNECTOR_H__ */ \ No newline at end of file +#endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */ \ No newline at end of file diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index e5fa35354968963859d0b4cbbc01139cbc309250..718eddeaf6a5d08c9dab4898f5a57c0192dcb80b 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -10,8 +10,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_GRAPH_GRAPHVIEW_H__ -#define __AIDGE_CORE_GRAPH_GRAPHVIEW_H__ +#ifndef AIDGE_CORE_GRAPH_GRAPHVIEW_H_ +#define AIDGE_CORE_GRAPH_GRAPHVIEW_H_ #include <map> #include <memory> @@ -33,14 +33,14 @@ enum class DataType; class GraphView : public std::enable_shared_from_this<GraphView> { private: /// @brief Name of the graphview - std::string mName; + std::string mName; /// @brief Set of nodes included in the GraphView - std::set<NodePtr> mNodes; + std::set<NodePtr> mNodes; /// @brief Set of nodes included in the graphview with names std::map<std::string, NodePtr> mNodeRegistry; - + /// @brief Nodes without input link std::set<NodePtr> mInputNodes; @@ -49,23 +49,23 @@ private: public: GraphView(std::string name="") - : mName(name) + : mName(name) { // ctor } // GraphView(std::set<NodePtr> nodes, std::string name="") - // : mName(name) + // : mName(name) // { // add(nodes); // } - bool operator==(const GraphView &gv) const + bool operator==(const GraphView &gv) const { return mNodes == gv.mNodes; } - NodePtr operator[](std::string name) + NodePtr operator[](std::string name) { assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView."); return mNodeRegistry.at(name); @@ -185,7 +185,7 @@ public: /** * @brief Get parents Nodes of the specified Node. * @param nodeName Name of the Node. - * @return std::vector<NodePtr> + * @return std::vector<NodePtr> */ std::vector<NodePtr> getParents(const std::string nodeName) const; std::vector<std::vector<NodePtr>> getOrderedParents() const; @@ -206,7 +206,7 @@ public: /** * @brief Get the Nodes pointed to by the GraphView object. - * @return std::set<NodePtr> + * @return std::set<NodePtr> */ inline std::set<NodePtr> getNodes() const { return mNodes; } @@ -233,14 +233,14 @@ public: /** * @brief Include a Node to the current GraphView object. * @param other_Nde Node to add. - * @param includeLearnableParam Include non-data inputs, like weights and biases + * @param includeLearnableParam Include non-data inputs, like weights and biases * in the GraphView automatically. Default: true. */ void add(NodePtr otherNode, bool includeLearnableParam = true); /** * @brief Include a set of Nodes to the current GraphView object. - * @param otherNodes - * @param includeLearnableParam + * @param otherNodes + * @param includeLearnableParam */ void add(std::set<NodePtr> otherNodes, bool includeLearnableParam = true); @@ -326,8 +326,8 @@ public: /** * @brief Replace the current GraphView with the set of given Nodes if possible * @param newNodes Set of Nodes. - * @return true - * @return false + * @return true + * @return false */ bool replaceWith(std::set<NodePtr> newNodes); void updateInputNodes(); @@ -343,13 +343,13 @@ private: /** * @brief Get the sum of the number of dataInput Nodes for all inputNodes of the GraphView object. - * @return IOIndex_t + * @return IOIndex_t */ IOIndex_t getNbDataInputs() const; /** * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object. - * @return IOIndex_t + * @return IOIndex_t */ IOIndex_t getNbFreeDataInputs() const; @@ -378,4 +378,4 @@ private: }; } // namespace Aidge -#endif /* __AIDGE_CORE_GRAPH_GRAPHVIEW_H__ */ \ No newline at end of file +#endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */ \ No newline at end of file diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp index 0780ce9a24da0ceb0c42b32944021f5df2fa9726..f056505e6e7839266213ac355cc0e1b93ab98f0d 100644 --- a/include/aidge/graph/Node.hpp +++ b/include/aidge/graph/Node.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_GRAPH_NODE_H__ -#define __AIDGE_CORE_GRAPH_NODE_H__ +#ifndef AIDGE_CORE_GRAPH_NODE_H_ +#define AIDGE_CORE_GRAPH_NODE_H_ #include <cassert> #include <memory> @@ -39,7 +39,7 @@ private: // Compare the content of the weak_ptrs auto sharedA = a.lock(); auto sharedB = b.lock(); - if (!sharedB) return false; // nothing after expired pointer + if (!sharedB) return false; // nothing after expired pointer if (!sharedA) return true; return sharedA < sharedB; // shared_ptr has a valid comparison operator } @@ -78,7 +78,7 @@ public: /** * @brief Functional operator for user-friendly connection interface using an ordered set of Connectors. * @param ctors Ordered Connectors linking their associated Node to the input of the current Node with the same index. - * @return Connector + * @return Connector */ Connector operator()(const std::vector<Connector> &ctors); @@ -165,7 +165,7 @@ public: /** * @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor. - * + * * @param idx Input index. * @param tensor Constant Tensor to add as parent for specified index. */ @@ -301,7 +301,7 @@ public: /** * @brief Get the pointer to parent of the specified input index. This pointer is nullptr if no parent is linked. * @param inId Input index. - * @return std::shared_ptr<Node>& + * @return std::shared_ptr<Node>& */ inline NodePtr &getParents(const IOIndex_t inId) { assert(inId != gk_IODefaultIndex); @@ -312,7 +312,7 @@ public: * @brief Unlink the parent Node at the specified input index and return its pointer. * Return a nullptr is no parent was linked. * @param inId Input index. - * @return std::shared_ptr<Node> + * @return std::shared_ptr<Node> */ NodePtr popParent(const IOIndex_t inId); @@ -331,7 +331,7 @@ public: /** * @brief Get the list of children Nodes linked to the output at specified index. * @param outId Output index. - * @return std::vector<std::shared_ptr<Node>> + * @return std::vector<std::shared_ptr<Node>> */ std::vector<NodePtr> getChildren(const IOIndex_t outId) const; @@ -364,8 +364,8 @@ private: /** * @brief Set the idInChildren parameter. - * @param inID - * @param newNodeOutID + * @param inID + * @param newNodeOutID */ void setInputId(const IOIndex_t inID, const IOIndex_t newNodeOutID); @@ -375,17 +375,17 @@ private: /** * @brief Add the given Node as a child for the current Node. - * @param otherNode - * @param outId - * @param otherInId + * @param otherNode + * @param outId + * @param otherInId */ void addChildOp(NodePtr otherNode, const IOIndex_t outId, const IOIndex_t otherInId); /** * @brief Add the given GraphView's input Node as a child for the current Node - * @param otherGraph - * @param outId + * @param otherGraph + * @param outId * @param otherInId pointer the GraphView's input Node and its input index. Defaults to the * only input Node if the GraphView has got one. */ @@ -402,4 +402,4 @@ private: }; } // namespace Aidge -#endif /* __AIDGE_CORE_GRAPH_NODE_H__ */ +#endif /* AIDGE_CORE_GRAPH_NODE_H_ */ diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp index dd0cfe1cca8a3f487c18875cff3f90cc56291107..560c3a02c641c29526752dbf352905d0ded32a7e 100644 --- a/include/aidge/graph/OpArgs.hpp +++ b/include/aidge/graph/OpArgs.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_GRAPH_OPARGS_H__ -#define __AIDGE_CORE_GRAPH_OPARGS_H__ +#ifndef AIDGE_CORE_GRAPH_OPARGS_H_ +#define AIDGE_CORE_GRAPH_OPARGS_H_ #include <memory> #include <cassert> @@ -30,7 +30,7 @@ private: public: OpArgs(const std::shared_ptr<GraphView>& view_) : mView(view_) {assert(mView && "The GraphView provided should not be a nullptr.");} - + OpArgs(const std::shared_ptr<Node>& node_) : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");} @@ -83,4 +83,4 @@ std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs); } -#endif /* __AIDGE_CORE_GRAPH_OPARGS_H__ */ \ No newline at end of file +#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */ \ No newline at end of file diff --git a/include/aidge/graphmatching/GRegex.hpp b/include/aidge/graphmatching/GRegex.hpp index 1292b607cee35f50dc0acc5f5113946be103065e..fd2d0c52ab47e0f03b3307bdbcfcb5a7b81d78d9 100644 --- a/include/aidge/graphmatching/GRegex.hpp +++ b/include/aidge/graphmatching/GRegex.hpp @@ -10,8 +10,8 @@ ********************************************************************************/ -#ifndef __AIDGE_GREGEX_H__ -#define __AIDGE_GREGEX_H__ +#ifndef AIDGE_GREGEX_H_ +#define AIDGE_GREGEX_H_ #include <stdexcept> // for exception, runtime_error, out_of_range #include <regex> @@ -43,7 +43,7 @@ public: bool walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm); bool walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm); - + std::set<NodeTmp> get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm); std::vector<SeqStm*> getStmInit() const { @@ -53,11 +53,11 @@ public: StmFactory getStmFab() const { return mStmFab; } - + //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> match(const std::shared_ptr<GraphView> graphToMatch); Match match(const std::shared_ptr<GraphView> graphToMatch); }; } -#endif //__AIDGE_GREGEX_H__ \ No newline at end of file +#endif //AIDGE_GREGEX_H_ \ No newline at end of file diff --git a/include/aidge/graphmatching/Match.hpp b/include/aidge/graphmatching/Match.hpp index 27acc2e8a0880f8c62d0ba995fcde5479bdcb501..fc617a22869fde6531fba67c8641581572cbffc4 100644 --- a/include/aidge/graphmatching/Match.hpp +++ b/include/aidge/graphmatching/Match.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_MATCH_H__ -#define __AIDGE_MATCH_H__ +#ifndef AIDGE_MATCH_H_ +#define AIDGE_MATCH_H_ #include <vector> #include <set> @@ -41,4 +41,4 @@ protected: }; } -#endif //__AIDGE_MATCH_H__ \ No newline at end of file +#endif //AIDGE_MATCH_H_ \ No newline at end of file diff --git a/include/aidge/graphmatching/NodeRegex.hpp b/include/aidge/graphmatching/NodeRegex.hpp index 387bfea46f0147613a116beac1f9c6102ed661e5..10ba7225834e4abfb7f0f5cd45ffa91b22f2f87d 100644 --- a/include/aidge/graphmatching/NodeRegex.hpp +++ b/include/aidge/graphmatching/NodeRegex.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_NODEREGEX_H__ -#define __AIDGE_NODEREGEX_H__ +#ifndef AIDGE_NODEREGEX_H_ +#define AIDGE_NODEREGEX_H_ #include <cstdlib> #include <iostream> #include <cstring> @@ -27,7 +27,7 @@ class NodeRegex NodeRegex(const std::string c){ mCondition = c; }; - + // Version 1 - Only test the type of the node (no need for a lexer) // Input : Node_op // Output : bool @@ -38,4 +38,4 @@ class NodeRegex } -#endif /* ___AIDGE_NODEREGEX_H___ */ \ No newline at end of file +#endif /* _AIDGE_NODEREGEX_H__ */ \ No newline at end of file diff --git a/include/aidge/graphmatching/SeqStm.hpp b/include/aidge/graphmatching/SeqStm.hpp index 6ccd6cfcd322c4d38af2ad04cd2b3a96d839e6cd..0823b5fc0f292d8cf28f7ead53d01bd8dd8adbfe 100755 --- a/include/aidge/graphmatching/SeqStm.hpp +++ b/include/aidge/graphmatching/SeqStm.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_SEQSTM_H__ -#define __AIDGE_SEQSTM_H__ +#ifndef AIDGE_SEQSTM_H_ +#define AIDGE_SEQSTM_H_ #include <iostream> #include <map> @@ -124,4 +124,4 @@ public: }; } // namespace Aidge -#endif /* __AIDGE_SEQSTM_H__ */ \ No newline at end of file +#endif /* AIDGE_SEQSTM_H_ */ \ No newline at end of file diff --git a/include/aidge/graphmatching/StmFactory.hpp b/include/aidge/graphmatching/StmFactory.hpp index 929fdaf3595038f21367768254040c45b291641b..b5850e4a00691ef6c808554a86a6ceec8c38ad19 100644 --- a/include/aidge/graphmatching/StmFactory.hpp +++ b/include/aidge/graphmatching/StmFactory.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_STMFACTORY_H__ -#define __AIDGE_STMFACTORY_H__ +#ifndef AIDGE_STMFACTORY_H_ +#define AIDGE_STMFACTORY_H_ #include <map> #include <utility> @@ -52,4 +52,4 @@ private: }; } -#endif //__AIDGE_STMFACTORY_H__ \ No newline at end of file +#endif //AIDGE_STMFACTORY_H_ \ No newline at end of file diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 36e592682e61fbc178ed4623f88e9fa5f446f25d..c96b2c571f412124ccdfb83dde685e111448a222 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_ADD_H__ -#define __AIDGE_CORE_OPERATOR_ADD_H__ +#ifndef AIDGE_CORE_OPERATOR_ADD_H_ +#define AIDGE_CORE_OPERATOR_ADD_H_ #include <numeric> #include <vector> @@ -93,14 +93,15 @@ public: assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator."); return *(mInputs[inputIdx].get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } - + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } + inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator."); return mInputs[inputIdx]; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "Add Operators has only 1 outputs"); + (void) outputIdx; // avoid unused warning return mOutput; } @@ -108,8 +109,9 @@ public: assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator."); return std::static_pointer_cast<Data>(mInputs[inputIdx]); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -144,4 +146,4 @@ inline std::shared_ptr<Node> Add(const char* name = nullptr) { } } -#endif /* __AIDGE_CORE_OPERATOR_ADD_H__ */ +#endif /* AIDGE_CORE_OPERATOR_ADD_H_ */ diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index a86942d14e531e5974c8924d8dafb8a4d0bebf85..7bf8740877e635cc2e59418bee1c444c7f3884e8 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_AVGPOOLING_H__ -#define __AIDGE_CORE_OPERATOR_AVGPOOLING_H__ +#ifndef AIDGE_CORE_OPERATOR_AVGPOOLING_H_ +#define AIDGE_CORE_OPERATOR_AVGPOOLING_H_ #include <array> #include <numeric> @@ -46,7 +46,7 @@ public: AvgPooling_Op() = delete; using Parameterizable_ = Parameterizable<AvgPoolingParam, - std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1)> >; template <AvgPoolingParam e> @@ -63,8 +63,9 @@ public: setDatatype(DataType::Float32); } - constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx < 1 && "operators supports only 3 inputs"); + (void) inputIdx; // avoid unused warning assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); mInput = std::dynamic_pointer_cast<Tensor>(data); @@ -76,7 +77,7 @@ public: for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) { outputDims[dim+2] = 1 + static_cast<DimSize_t>( - std::floor(static_cast<float>(mInput->dims()[dim+2] - + std::floor(static_cast<float>(mInput->dims()[dim+2] - this->template get<AvgPoolingParam::KernelDims>()[dim] + this->template get<AvgPoolingParam::PaddingDims>()[dim] + this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) / @@ -91,29 +92,34 @@ public: bool outputDimsForwarded() const override final { return !(mOutput->empty()); } - inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + inline Tensor& input(const IOIndex_t inputIdx) const override final { assert(inputIdx == 0 && "operators supports only 1 inputs"); + (void) inputIdx; // avoid unused warning return *(mInput.get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } - inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs"); + (void) inputIdx; // avoid unused warning return mInput; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs"); + (void) outputIdx; // avoid unused warning return mOutput; } - std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { assert(inputIdx == 0 && "operators supports only 1 inputs"); + (void) inputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mInput); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -166,4 +172,4 @@ const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"}; } -#endif /* __AIDGE_CORE_OPERATOR_AVGPOOLING_H__ */ +#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */ diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 6c64ae44c04f9a8f37d0dde14b251da94ce72a3f..07af5fa8416cf726e209cd9e690af345b321fb0e 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_BATCHNORM_H__ -#define __AIDGE_CORE_OPERATOR_BATCHNORM_H__ +#ifndef AIDGE_CORE_OPERATOR_BATCHNORM_H_ +#define AIDGE_CORE_OPERATOR_BATCHNORM_H_ #include <array> #include <memory> @@ -53,7 +53,7 @@ public: Parameterizable_(param<BatchNormParam::Epsilon>(epsilon), param<BatchNormParam::Momentum>(momentum)), mOutput(std::make_shared<Tensor>()) { - setDatatype(DataType::Float32); + setDatatype(DataType::Float32); } // Data operator[](const char* inputName) override final { @@ -65,7 +65,7 @@ public: // return *in; // } - constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx < 5 && "operators supports only 5 inputs"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); @@ -90,15 +90,16 @@ public: assert(inputIdx < 5 && "operators supports only 5 inputs"); return *(mInputs[inputIdx].get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs"); return mInputs[inputIdx]; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "BatchNorm Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } @@ -107,8 +108,9 @@ public: assert(inputIdx < 5 && "operators supports only 5 inputs"); return std::static_pointer_cast<Data>(mInputs[inputIdx]); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -158,4 +160,4 @@ template <> const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" }; } -#endif // __AIDGE_CORE_OPERATOR_BATCHNORM_H__ \ No newline at end of file +#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ \ No newline at end of file diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index babeac443dd8d51a8b9d3de5a2e96b8745636060..d6efba2cec6908ad58b9feea5e53807c7227cc88 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_CONV_H__ -#define __AIDGE_CORE_OPERATOR_CONV_H__ +#ifndef AIDGE_CORE_OPERATOR_CONV_H_ +#define AIDGE_CORE_OPERATOR_CONV_H_ #include <array> #include <cmath> @@ -63,7 +63,7 @@ public: param<ConvParam::KernelDims>(kernel_dims), param<ConvParam::PaddingDims>(padding_dims)), mOutput(std::make_shared<Tensor>()) { - setDatatype(DataType::Float32); + setDatatype(DataType::Float32); } // Data operator[](const char* inputName) override final { @@ -79,7 +79,7 @@ public: // } - constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); @@ -114,15 +114,16 @@ public: inline Tensor& input(const IOIndex_t inputIdx) const override final { assert(inputIdx < 3 && "operators supports only 3 inputs"); return *(mInputs[inputIdx].get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert(inputIdx < 3 && "Conv Operators supports only 3 inputs"); return mInputs[inputIdx]; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "Conv Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } @@ -131,8 +132,9 @@ public: assert(inputIdx < 3 && "operators supports only 3 inputs"); return std::static_pointer_cast<Data>(mInputs[inputIdx]); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -161,7 +163,7 @@ public: }; template <std::array<DimSize_t, 1>::size_type DIM> -inline std::shared_ptr<Node> Conv(DimSize_t in_channels, +inline std::shared_ptr<Node> Conv(DimSize_t in_channels, DimSize_t out_channels, const std::array<DimSize_t, DIM> &kernel_dims, const char *name = nullptr, @@ -197,4 +199,4 @@ const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "Dilati "KernelDims", "PaddingDims"}; } -#endif /* __AIDGE_CORE_OPERATOR_CONV_H__ */ +#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */ diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index 7cbc609798064e993c7744fdf08865d897518a89..a3b7fbf3b21a5b3fd9e532e0cc19cebd46e5d022 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__ -#define __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__ +#ifndef AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ +#define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ #include <array> #include <cmath> @@ -49,9 +49,9 @@ class ConvDepthWise_Op : public Operator, ConvDepthWise_Op() = delete; using Parameterizable_ = Parameterizable<ConvDepthWiseParam, - std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, - DimSize_t, + std::array<DimSize_t, DIM>, + DimSize_t, std::array<DimSize_t, DIM>, std::array<DimSize_t, (DIM<<1) >>; template <ConvDepthWiseParam e> @@ -62,7 +62,7 @@ class ConvDepthWise_Op : public Operator, const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) : Operator(Type), - Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims), + Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims), param<ConvDepthWiseParam::DilationDims>(dilation_dims), param<ConvDepthWiseParam::Channels>(0), param<ConvDepthWiseParam::KernelDims>(kernel_dims), @@ -71,7 +71,7 @@ class ConvDepthWise_Op : public Operator, setDatatype(DataType::Float32); } - constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); @@ -114,15 +114,16 @@ class ConvDepthWise_Op : public Operator, assert(inputIdx < 3 && "operators supports only 3 inputs"); return *(mInputs[inputIdx].get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs"); return mInputs[inputIdx]; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } @@ -130,9 +131,10 @@ class ConvDepthWise_Op : public Operator, std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { assert(inputIdx < 3 && "operators supports only 3 inputs"); return std::static_pointer_cast<Data>(mInputs[inputIdx]); - } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + } + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -193,4 +195,4 @@ const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims" "KernelDims", "PaddingDims"}; } -#endif /* __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__ */ +#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */ diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index ebd3a8826dbca292b57f4d3cae749f4e1d7968c8..6e4c54a030c108c29c08a8f5dfdc24d084ccc91c 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_FC_H__ -#define __AIDGE_CORE_OPERATOR_FC_H__ +#ifndef AIDGE_CORE_OPERATOR_FC_H_ +#define AIDGE_CORE_OPERATOR_FC_H_ #include <array> #include <cmath> @@ -57,7 +57,7 @@ public: setDatatype(DataType::Float32); } - void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx < 3 && "operators supports only 3 inputs"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); if (inputIdx == 2) { @@ -75,7 +75,7 @@ public: std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())}; // <out_channels, batch> std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()}; - + mInputs[1]->resize(weightDims); mOutput->resize(outputDims); } @@ -89,15 +89,16 @@ public: inline Tensor& input(const IOIndex_t inputIdx) const override final { assert(inputIdx < 3 && "operators supports only 3 inputs"); return *(mInputs[inputIdx].get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mOutput.get()); } + inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); } inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert(inputIdx < 3 && "FC Operators supports only 3 inputs"); return mInputs[inputIdx]; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "FC Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } @@ -106,8 +107,9 @@ public: assert(inputIdx < 3 && "operators supports only 3 inputs"); return std::static_pointer_cast<Data>(mInputs[inputIdx]); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -152,4 +154,4 @@ const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels", "NoBias"}; } -#endif /* __AIDGE_CORE_OPERATOR_FC_H__ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_FC_H_ */ \ No newline at end of file diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 86b96bfaa8bf0eb5ab52fa542f169708ff8d09ca..a3e1f02912fb3abdc8adeb09971ee090e875c1fb 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__ -#define __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__ +#ifndef AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ +#define AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ #include <memory> #include <vector> @@ -85,7 +85,7 @@ class GenericOperator_Op std::vector<std::string> getParametersName() { return mParams.getParametersName(); } // Override Virtual Opertor methods - void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final { + void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final { printf("Info: using associateInput() on a GenericOperator.\n"); } @@ -163,4 +163,4 @@ inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataI } } // namespace Aidge -#endif /* __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__ */ +#endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */ diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index ed967001a23a6b9dd4cfe5db09ec4f1edd60e5ea..64587d51de784082da455eb64aa5bbe175773b5d 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_LEAKYRELU_H__ -#define __AIDGE_CORE_OPERATOR_LEAKYRELU_H__ +#ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_ +#define AIDGE_CORE_OPERATOR_LEAKYRELU_H_ #include <vector> #include <memory> @@ -53,8 +53,9 @@ public: setDatatype(DataType::Float32); } - void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx == 0 && "operator supports only 1 input"); + (void) inputIdx; // avoid unused warning assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); mInput = std::dynamic_pointer_cast<Tensor>(data); } @@ -69,26 +70,30 @@ public: } - inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } - inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input"); + (void) inputIdx; // avoid unused warning return mInput; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } - std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { assert(inputIdx == 0 && "operator supports only 1 input"); + (void) inputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mInput); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } @@ -124,4 +129,4 @@ const char* const EnumStrings<Aidge::LeakyReLUParam>::data[] = {"NegativeSlope"}; } -#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */ +#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp index a871fe516c95802fdb67e81ca3f58fb3be4dce25..b44e8a9b9540e287ff35af1c9642c8202fd096d0 100644 --- a/include/aidge/operator/Matmul.hpp +++ b/include/aidge/operator/Matmul.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_MATMUL_H__ -#define __AIDGE_CORE_OPERATOR_MATMUL_H__ +#ifndef AIDGE_CORE_OPERATOR_MATMUL_H_ +#define AIDGE_CORE_OPERATOR_MATMUL_H_ #include <array> #include <cmath> @@ -55,7 +55,7 @@ public: setDatatype(DataType::Float32); } - void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx < 2 && "operators supports only 2 inputs"); assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); @@ -67,7 +67,7 @@ public: std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()}; // <out_channels, batch> std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()}; - + mInputs[1]->resize(weightDims); mOutput->resize(outputDims); } @@ -81,15 +81,16 @@ public: inline Tensor& input(const IOIndex_t inputIdx) const override final { assert(inputIdx < 2 && "operators supports only 2 inputs"); return *(mInputs[inputIdx].get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert(inputIdx < 2 && "MatMul Operators has 2 inputs"); return mInputs[inputIdx]; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "MatMul Operators has 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } @@ -98,8 +99,9 @@ public: assert(inputIdx < 2 && "operators supports only 2 inputs"); return std::static_pointer_cast<Data>(mInputs[inputIdx]); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -140,4 +142,4 @@ template <> const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"}; } -#endif /* __AIDGE_CORE_OPERATOR__MATMUL_H__ */ +#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */ diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 7fa1a20449d055da9cd25e6dc4f987757aca3f4a..35a59b56cbf5c10a78116f81de96a8baddc03ff0 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_METAOPERATOR_H__ -#define __AIDGE_CORE_OPERATOR_METAOPERATOR_H__ +#ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_ +#define AIDGE_CORE_OPERATOR_METAOPERATOR_H_ #include "aidge/operator/Operator.hpp" @@ -25,4 +25,4 @@ public: }; } -#endif /* MetaOperator_H__ */ +#endif /* MetaOperator_H_ */ diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index 9f24ce884863776f6856ee03fb4feb089e6323e2..30e1ce2a7f664485077282405ec60ddf49513cb5 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_OPERATOR_H__ -#define __AIDGE_CORE_OPERATOR_OPERATOR_H__ +#ifndef AIDGE_CORE_OPERATOR_OPERATOR_H_ +#define AIDGE_CORE_OPERATOR_OPERATOR_H_ #include <memory> #include <string> @@ -58,23 +58,23 @@ public: /** * @brief Minimum amount of data from a specific input for one computation pass. * @param inputIdx Index of the input analysed. - * @return NbElts_t + * @return NbElts_t */ NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const; /** * @brief Amount of data from a specific input actually used in one computation pass. - * + * * @param inputIdx Index of the input analysed. - * @return NbElts_t + * @return NbElts_t */ NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const; /** * @brief Amount of data ready to be used on a specific output. - * + * * @param outputIdx Index of the output analysed. - * @return NbElts_t + * @return NbElts_t */ NbElts_t getNbProducedData(const IOIndex_t outputIdx) const; @@ -96,4 +96,4 @@ public: }; } // namespace Aidge -#endif /* __AIDGE_CORE_OPERATOR_OPERATOR_H__ */ +#endif /* AIDGE_CORE_OPERATOR_OPERATOR_H_ */ diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 4d5461957826e9ebea4a39bb9a7618604e80797a..1f77400ce8a8ef727ea9e0a7d12477c6519ea2df 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_PRODUCER_H__ -#define __AIDGE_CORE_OPERATOR_PRODUCER_H__ +#ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_ +#define AIDGE_CORE_OPERATOR_PRODUCER_H_ #include <array> #include <vector> @@ -51,39 +51,41 @@ public: setDatatype(tensor->dataType()); } - void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final { + void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final { assert(false && "Producer operator takes no input"); } - constexpr void computeOutputDims() override final {} + void computeOutputDims() override final {} - constexpr bool outputDimsForwarded() const override final {return true;} + bool outputDimsForwarded() const override final {return true;} - [[noreturn]] inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + [[noreturn]] inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { assert(false); exit(-1); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } - inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + inline std::shared_ptr<Tensor> getInput(const IOIndex_t /*inputIdx*/) const override final { assert(false && "Producer Operator has no input"); return nullptr; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "Producer Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } - std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + std::shared_ptr<Data> getRawInput(const IOIndex_t /*inputIdx*/) const override final { assert(false && "Producer operator takes no input"); return nullptr; } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -141,4 +143,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim } } // namespace Aidge -#endif /* __AIDGE_CORE_OPERATOR_PRODUCER_H__ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */ \ No newline at end of file diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 93bc9a74091c2893dc7b1f7fcc34c72828f34f27..3ea90462cf2b083a1a61ae39be06471093ec9f9f 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_RELU_H__ -#define __AIDGE_CORE_OPERATOR_RELU_H__ +#ifndef AIDGE_CORE_OPERATOR_RELU_H_ +#define AIDGE_CORE_OPERATOR_RELU_H_ #include <cassert> #include <memory> @@ -42,8 +42,9 @@ public: setDatatype(DataType::Float32); } - void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx == 0 && "operator supports only 1 input"); + (void) inputIdx; // avoid unused warning assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); mInput = std::dynamic_pointer_cast<Tensor>(data); } @@ -58,26 +59,30 @@ public: } - inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } - inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert((inputIdx == 0) && "ReLU Operator has only 1 input"); + (void) inputIdx; // avoid unused warning return mInput; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "ReLU Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } - std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { assert(inputIdx == 0 && "operator supports only 1 input"); + (void) inputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mInput); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -107,4 +112,4 @@ inline std::shared_ptr<Node> ReLU(const char* name = nullptr) { } } -#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */ +#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 9be2acde8570bdc250054e9bed7a1b0d5c3e52ff..93eb262f703ca7eb385641c77df7ae7e79c00b96 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_SOFTMAX_H__ -#define __AIDGE_CORE_OPERATOR_SOFTMAX_H__ +#ifndef AIDGE_CORE_OPERATOR_SOFTMAX_H_ +#define AIDGE_CORE_OPERATOR_SOFTMAX_H_ #include <cassert> #include <memory> @@ -42,8 +42,9 @@ public: setDatatype(DataType::Float32); } - void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { assert(inputIdx == 0 && "operator supports only 1 input"); + (void) inputIdx; // avoid unused warning assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type"); mInput = std::dynamic_pointer_cast<Tensor>(data); } @@ -58,26 +59,30 @@ public: } - inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); } - inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); } + inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } - inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { assert((inputIdx == 0) && "Softmax Operator has only 1 input"); + (void) inputIdx; // avoid unused warning return mInput; } - inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { assert((outputIdx == 0) && "Softmax Operator has only 1 output"); + (void) outputIdx; // avoid unused warning return mOutput; } - std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { assert(inputIdx == 0 && "operator supports only 1 input"); + (void) inputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mInput); } - std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final { + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning return std::static_pointer_cast<Data>(mOutput); } @@ -107,4 +112,4 @@ inline std::shared_ptr<Node> Softmax(const char* name = nullptr) { } } -#endif /* __AIDGE_CORE_OPERATOR_SOFTMAX_H__ */ +#endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */ diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index 2abe90e111c0997928d270b149a6ab4a460eb3aa..81b3f31662933fe4f59a17cdb0ee42441fb791bc 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_SCHEDULER_H__ -#define __AIDGE_SCHEDULER_H__ +#ifndef AIDGE_SCHEDULER_H_ +#define AIDGE_SCHEDULER_H_ #include <chrono> #include <memory> @@ -57,9 +57,9 @@ public: private: /** * @brief Set of layers receiving an input from currently processing layers - * + * * @param producers Set of layers ready to run. - * @return std::set<std::shared_ptr<Node>> + * @return std::set<std::shared_ptr<Node>> */ std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const; @@ -68,4 +68,4 @@ private: }; } // namespace Aidge -#endif /* __AIDGE_SCHEDULER_H__ */ \ No newline at end of file +#endif /* AIDGE_SCHEDULER_H_ */ \ No newline at end of file diff --git a/include/aidge/utils/CParameter.hpp b/include/aidge/utils/CParameter.hpp index 64943ff58eae9a06fe50afb1b81deea1b66e90ea..0f4c74ab8bccb7bc134e035a5f12d31d51663e5d 100644 --- a/include/aidge/utils/CParameter.hpp +++ b/include/aidge/utils/CParameter.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CPARAMETER_H__ -#define __AIDGE_CPARAMETER_H__ +#ifndef AIDGE_CPARAMETER_H_ +#define AIDGE_CPARAMETER_H_ #include <assert.h> #include <map> @@ -112,4 +112,4 @@ private: } -#endif /* __AIDGE_CPARAMETER_H__ */ +#endif /* AIDGE_CPARAMETER_H_ */ diff --git a/include/aidge/utils/Parameter.hpp b/include/aidge/utils/Parameter.hpp index 6a8fcca41ff03951eeac80493cd9f86a2ea3586b..b0c6e35950187f17d991cfe5b2c9bd2b09f1e70f 100644 --- a/include/aidge/utils/Parameter.hpp +++ b/include/aidge/utils/Parameter.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_UTILS_PARAMETER_H__ -#define __AIDGE_CORE_UTILS_PARAMETER_H__ +#ifndef AIDGE_CORE_UTILS_PARAMETER_H_ +#define AIDGE_CORE_UTILS_PARAMETER_H_ #ifdef PYBIND #include <pybind11/pybind11.h> @@ -40,23 +40,23 @@ constexpr std::size_t size(T (&)[N]) { return N; } #ifdef PYBIND /* This abstract class allows to avoid binding Parametrizable. * Otherwise we would need to bind every template possible of Parametrizable. -* Every operators can access the methods of this class by inheriting from +* Every operators can access the methods of this class by inheriting from * PyAbstractParametrizable in the binding code. */ -class PyAbstractParametrizable{ +class PyAbstractParametrizable{ public: /* Bindable get function, does not recquire any templating. * This is thanks to py::object which allow the function to * be agnostic from its return type. */ virtual py::object getPy(const char* /*name*/) = 0; -}; +}; #endif template <class PARAM_ENUM, class ...T> class Parameterizable #ifdef PYBIND - : public PyAbstractParametrizable + : public PyAbstractParametrizable #endif { public: @@ -99,7 +99,7 @@ public: constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() { return std::get<static_cast<std::size_t>(paramEnum)>(mParams); } - + template <PARAM_ENUM paramEnum> constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const { return std::get<static_cast<std::size_t>(paramEnum)>(mParams); @@ -194,4 +194,4 @@ private: }; } -#endif /* AIDGE_CORE_UTILS_PARAMETER_H__ */ +#endif /* AIDGE_CORE_UTILS_PARAMETER_H_ */ diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/utils/Recipies.hpp index d6104c56ce288d260ac78c5eb9d1e83d75ca34c8..4cbf8fd284bef314dbe28b19ebdae05172467bad 100644 --- a/include/aidge/utils/Recipies.hpp +++ b/include/aidge/utils/Recipies.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_UTILS_RECIPIES_H__ -#define __AIDGE_CORE_UTILS_RECIPIES_H__ +#ifndef AIDGE_CORE_UTILS_RECIPIES_H_ +#define AIDGE_CORE_UTILS_RECIPIES_H_ #include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" @@ -24,4 +24,4 @@ void removeFlatten(std::set<std::shared_ptr<Node>> nodes); } -#endif /* __AIDGE_CORE_UTILS_RECIPIES_H__ */ \ No newline at end of file +#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */ \ No newline at end of file diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp index 8348eb98d3f3ab4da0873c8b3f4a476a9f8e1afc..98749c1349bad644dee2c1a8549559939791f71c 100644 --- a/include/aidge/utils/Registrar.hpp +++ b/include/aidge/utils/Registrar.hpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_UTILS_REGISTRAR_H__ -#define __AIDGE_CORE_UTILS_REGISTRAR_H__ +#ifndef AIDGE_CORE_UTILS_REGISTRAR_H_ +#define AIDGE_CORE_UTILS_REGISTRAR_H_ #ifdef PYBIND #include <pybind11/pybind11.h> @@ -68,8 +68,8 @@ struct Registrar { for(auto keyValue : C::registry()) keys.push_back(keyValue.first); return keys; - } + } }; } -#endif // __AIDGE_CORE_UTILS_REGISTRAR_H__ \ No newline at end of file +#endif //AIDGE_CORE_UTILS_REGISTRAR_H_ \ No newline at end of file diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h index d05c64ead0e147a8d66c7f40dbd978283401683a..d65279f1f4d36498ea7653428332690fc99a5def 100644 --- a/include/aidge/utils/Types.h +++ b/include/aidge/utils/Types.h @@ -10,8 +10,8 @@ ********************************************************************************/ -#ifndef __AIDGE_TYPES_H__ -#define __AIDGE_TYPES_H__ +#ifndef AIDGE_TYPES_H_ +#define AIDGE_TYPES_H_ #include <limits> #include <type_traits> @@ -59,4 +59,4 @@ constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max() - 1; } // namespace Aidge -#endif // __AIDGE_TYPES_H__ \ No newline at end of file +#endif //AIDGE_TYPES_H_ \ No newline at end of file diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index 3f741946da59a118b023f0204da4f42231c1416d..d6442723ecc79527e8eaa7d3e03a466c085dfa58 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -98,9 +98,9 @@ void init_Tensor(py::module& m){ .def_buffer([](Tensor& b) -> py::buffer_info { const std::unique_ptr<TensorImpl>& tensorImpl = b.getImpl(); - std::vector<ssize_t> dims; - std::vector<ssize_t> strides; - ssize_t stride = tensorImpl->scalarSize(); + std::vector<size_t> dims; + std::vector<size_t> strides; + size_t stride = tensorImpl->scalarSize(); for (unsigned int dim = b.nbDims(); dim > 0; dim--) { dims.push_back(b.dims()[dim-1]); diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 53d9f844a6a5bda4961659b8ff7f8b1fcf53b4e7..ad412f5b86d9cf0dee0823736548baeb7c7320a7 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -28,13 +28,15 @@ Aidge::Connector Aidge::GraphView::operator()( assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour"); std::shared_ptr<Node> inNode = *inputNodes().begin(); assert((ctors.size() == static_cast<std::size_t>(inNode->nbDataInputs())) && "Wrong number of arguments.\n"); - for (__attribute__((unused)) std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) { + for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) { assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n"); + (void)input; // avoid unused warning } - for (__attribute__((unused)) const Connector &ctor : ctors) { + for (const Connector &ctor : ctors) { assert((ctor.node() != nullptr) && "Input Connector must be associated with a node"); + (void)ctors; // avoid unused warning } IOIndex_t inID = 0; for (const Connector &ctor : ctors) { diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index 286ed7136a369e63f567b35135f89afcc266e0e1..b3db5befbdc8299114514d8d554d439bffc5eae2 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -35,8 +35,9 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const char *name) Aidge::Connector Aidge::Node::operator()(const std::vector<Connector> &ctors) { assert((ctors.size() == nbDataInputs()) && "Wrong number of arguments.\n"); - for (__attribute__((unused)) std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inputs()) { + for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inputs()) { assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n"); + (void) input; // avoid unused warning } IOIndex_t i = 0; for (const Connector &ctor : ctors) {