From d2820fa231e0349b1b37b978125ea52aaaaaa73e Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Mon, 21 Oct 2024 02:18:23 +0000 Subject: [PATCH] [WIP] Add more namespaces in Aidge - add namespace Operator for each operator - change Operator class for AbsOperator to avoid confusion with the namespace - move each operator attribute enum inside the corresponding class --- include/aidge/backend/OperatorImpl.hpp | 30 ++++++------ include/aidge/backend/cpu/data/TensorImpl.hpp | 2 +- include/aidge/data/DataProvider.hpp | 24 +++++----- include/aidge/data/Tensor.hpp | 2 +- include/aidge/filler/Filler.hpp | 2 +- include/aidge/graph/Connector.hpp | 2 +- include/aidge/graph/GraphView.hpp | 2 +- include/aidge/graph/Matching.hpp | 4 +- include/aidge/graph/Node.hpp | 12 ++--- .../aidge/graphRegex/GraphFsmInterpreter.hpp | 2 +- .../nodeTester/ConditionalInterpreter.hpp | 2 +- include/aidge/operator/Abs.hpp | 7 ++- include/aidge/operator/Add.hpp | 7 ++- include/aidge/operator/And.hpp | 5 +- include/aidge/operator/ArgMax.hpp | 39 ++++++++-------- include/aidge/operator/AvgPooling.hpp | 39 +++++++++++----- include/aidge/operator/BatchNorm.hpp | 33 +++++++------ include/aidge/operator/BitShift.hpp | 46 ++++++++++--------- include/aidge/operator/Cast.hpp | 23 ++++++---- include/aidge/operator/Concat.hpp | 22 +++++---- include/aidge/operator/ConstantOfShape.hpp | 7 ++- include/aidge/operator/Conv.hpp | 27 +++++++---- include/aidge/operator/ConvDepthWise.hpp | 31 ++++++++----- include/aidge/operator/DepthToSpace.hpp | 23 ++++++---- include/aidge/operator/Div.hpp | 4 +- include/aidge/operator/Erf.hpp | 7 ++- include/aidge/operator/FC.hpp | 5 +- include/aidge/operator/Fold.hpp | 12 +++-- include/aidge/operator/Gather.hpp | 24 ++++++---- include/aidge/operator/GenericOperator.hpp | 7 ++- .../aidge/operator/GlobalAveragePooling.hpp | 4 +- include/aidge/operator/GridSample.hpp | 4 +- include/aidge/operator/ILayerNorm.hpp | 7 ++- include/aidge/operator/Identity.hpp | 7 ++- include/aidge/operator/LeakyReLU.hpp | 23 ++++++---- include/aidge/operator/{Ln.hpp => Log.hpp} | 19 ++++---- include/aidge/operator/MatMul.hpp | 5 +- include/aidge/operator/MaxPooling.hpp | 24 ++++++---- include/aidge/operator/Memorize.hpp | 29 +++++++----- include/aidge/operator/MetaOperator.hpp | 8 +++- include/aidge/operator/MetaOperatorDefs.hpp | 5 +- include/aidge/operator/Move.hpp | 7 ++- include/aidge/operator/Mul.hpp | 4 +- include/aidge/operator/Operator.hpp | 20 ++++---- include/aidge/operator/OperatorTensor.hpp | 11 +++-- include/aidge/operator/Pad.hpp | 44 ++++++++++-------- include/aidge/operator/Pop.hpp | 25 ++++++---- include/aidge/operator/Pow.hpp | 5 +- include/aidge/operator/Producer.hpp | 18 +++++--- include/aidge/operator/ReLU.hpp | 7 ++- include/aidge/operator/ReduceMean.hpp | 24 ++++++---- include/aidge/operator/ReduceSum.hpp | 32 +++++++------ include/aidge/operator/Reshape.hpp | 25 ++++++---- include/aidge/operator/Resize.hpp | 6 ++- include/aidge/operator/Round.hpp | 8 ++-- include/aidge/operator/Scaling.hpp | 25 ++++++---- include/aidge/operator/Shape.hpp | 21 +++++---- include/aidge/operator/ShiftGELU.hpp | 7 ++- include/aidge/operator/ShiftMax.hpp | 7 ++- include/aidge/operator/Sigmoid.hpp | 7 ++- include/aidge/operator/Slice.hpp | 26 ++++++----- include/aidge/operator/Softmax.hpp | 17 ++++--- include/aidge/operator/Split.hpp | 23 ++++++---- include/aidge/operator/Sqrt.hpp | 8 +++- include/aidge/operator/Squeeze.hpp | 6 ++- include/aidge/operator/Sub.hpp | 4 +- include/aidge/operator/Tanh.hpp | 7 ++- include/aidge/operator/Transpose.hpp | 21 +++++---- include/aidge/operator/Unfold.hpp | 25 ++++++---- include/aidge/operator/Unsqueeze.hpp | 37 ++++++++------- include/aidge/recipes/Recipes.hpp | 2 +- include/aidge/scheduler/MemoryManager.hpp | 34 +++++++------- include/aidge/scheduler/ProdConso.hpp | 10 ++-- include/aidge/utils/DynamicAttributes.hpp | 2 +- include/aidge/utils/Random.hpp | 6 +-- include/aidge/utils/StaticAttributes.hpp | 2 +- .../backend/pybind_OperatorImpl.cpp | 2 +- python_binding/data/pybind_DataProvider.cpp | 2 +- python_binding/data/pybind_Database.cpp | 2 +- python_binding/data/pybind_TensorImpl.cpp | 2 +- python_binding/filler/pybind_Filler.cpp | 2 +- python_binding/graph/pybind_GraphView.cpp | 4 +- python_binding/graph/pybind_Node.cpp | 8 ++-- python_binding/operator/pybind_And.cpp | 2 +- python_binding/operator/pybind_ArgMax.cpp | 12 ++--- python_binding/operator/pybind_BatchNorm.cpp | 2 +- python_binding/operator/pybind_BitShift.cpp | 8 ++-- python_binding/operator/pybind_Concat.cpp | 2 +- python_binding/operator/pybind_Div.cpp | 2 +- python_binding/operator/pybind_Erf.cpp | 2 +- python_binding/operator/pybind_Gather.cpp | 2 +- .../operator/pybind_GenericOperator.cpp | 2 +- python_binding/operator/pybind_Identity.cpp | 2 +- python_binding/operator/pybind_LeakyReLU.cpp | 2 +- python_binding/operator/pybind_Ln.cpp | 2 +- python_binding/operator/pybind_Memorize.cpp | 2 +- python_binding/operator/pybind_Mul.cpp | 2 +- python_binding/operator/pybind_Operator.cpp | 46 +++++++++---------- .../operator/pybind_OperatorTensor.cpp | 2 +- python_binding/operator/pybind_Pop.cpp | 2 +- python_binding/operator/pybind_Pow.cpp | 2 +- python_binding/operator/pybind_ReLU.cpp | 2 +- python_binding/operator/pybind_ReduceMean.cpp | 12 ++--- python_binding/operator/pybind_ReduceSum.cpp | 12 ++--- python_binding/operator/pybind_Reshape.cpp | 2 +- python_binding/operator/pybind_Resize.cpp | 2 +- python_binding/operator/pybind_Round.cpp | 4 +- python_binding/operator/pybind_Scaling.cpp | 2 +- python_binding/operator/pybind_Shape.cpp | 2 +- python_binding/operator/pybind_Sigmoid.cpp | 2 +- python_binding/operator/pybind_Slice.cpp | 2 +- python_binding/operator/pybind_Softmax.cpp | 2 +- python_binding/operator/pybind_Split.cpp | 2 +- python_binding/operator/pybind_Sqrt.cpp | 2 +- python_binding/operator/pybind_Squeeze.cpp | 4 +- python_binding/operator/pybind_Sub.cpp | 2 +- python_binding/operator/pybind_Tanh.cpp | 2 +- python_binding/operator/pybind_Unsqueeze.cpp | 4 +- python_binding/pybind_core.cpp | 2 +- python_binding/recipes/pybind_Recipes.cpp | 4 +- .../scheduler/pybind_MemoryManager.cpp | 4 +- python_binding/scheduler/pybind_ProdConso.cpp | 2 +- python_binding/utils/pybind_Log.cpp | 8 ++-- python_binding/utils/pybind_Random.cpp | 2 +- src/backend/OperatorImpl.cpp | 2 +- src/graph/Node.cpp | 8 ++-- src/graphRegex/matchFsm/FsmGraph.cpp | 4 +- src/nodeTester/ConditionalInterpreter.cpp | 40 ++++++++-------- src/operator/Add.cpp | 2 +- src/operator/AvgPooling.cpp | 2 +- src/operator/BatchNorm.cpp | 2 +- src/operator/Concat.cpp | 2 +- src/operator/DepthToSpace.cpp | 2 +- src/operator/Erf.cpp | 2 +- src/operator/FC.cpp | 2 +- src/operator/Fold.cpp | 2 +- src/operator/Gather.cpp | 2 +- src/operator/GenericOperator.cpp | 2 +- src/operator/GlobalAveragePooling.cpp | 2 +- src/operator/GridSample.cpp | 2 +- src/operator/Identity.cpp | 2 +- src/operator/LeakyReLU.cpp | 2 +- src/operator/Ln.cpp | 2 +- src/operator/MatMul.cpp | 2 +- src/operator/MaxPooling.cpp | 2 +- src/operator/Memorize.cpp | 2 +- src/operator/MetaOperator.cpp | 2 +- src/operator/Move.cpp | 2 +- src/operator/Mul.cpp | 2 +- src/operator/Operator.cpp | 24 +++++----- src/operator/OperatorTensor.cpp | 6 +-- src/operator/Pad.cpp | 2 +- src/operator/Pop.cpp | 6 +-- src/operator/Producer.cpp | 2 +- src/operator/ReLU.cpp | 2 +- src/operator/ReduceMean.cpp | 2 +- src/operator/Reshape.cpp | 2 +- src/operator/Resize.cpp | 2 +- src/operator/Round.cpp | 2 +- src/operator/Scaling.cpp | 2 +- src/operator/Shape.cpp | 2 +- src/operator/ShiftGELU.cpp | 2 +- src/operator/ShiftMax.cpp | 2 +- src/operator/Sigmoid.cpp | 2 +- src/operator/Slice.cpp | 2 +- src/operator/Softmax.cpp | 2 +- src/operator/Split.cpp | 2 +- src/operator/Sqrt.cpp | 2 +- src/operator/Sub.cpp | 2 +- src/operator/Tanh.cpp | 2 +- src/operator/Transpose.cpp | 2 +- src/operator/Unfold.cpp | 2 +- src/scheduler/ProdConso.cpp | 2 +- src/utils/Log.cpp | 2 +- unit_tests/graphRegex/Test_examples.cpp | 2 +- unit_tests/operator/Test_BitShift_Op.cpp | 4 +- unit_tests/operator/Test_ConvDepthWise_Op.cpp | 2 +- unit_tests/operator/Test_Conv_Op.cpp | 2 +- unit_tests/operator/Test_Operator.cpp | 4 +- unit_tests/recipes/Test_FuseToMetaOps.cpp | 2 +- unit_tests/recipes/Test_MatMulToFC.cpp | 2 +- 181 files changed, 879 insertions(+), 635 deletions(-) rename include/aidge/operator/{Ln.hpp => Log.hpp} (77%) diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp index 4af7da64e..d35ff36d3 100644 --- a/include/aidge/backend/OperatorImpl.hpp +++ b/include/aidge/backend/OperatorImpl.hpp @@ -24,11 +24,11 @@ namespace Aidge { class Node; -class Operator; +class AbsOperator; /** * @brief ImplSpec stores the requirements or the specifications of an implementation. - * + * */ struct ImplSpec { struct IOSpec { @@ -76,23 +76,23 @@ inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) { /** * @brief Impl stores the details of a specific implementation. * It is associated to a ImplSpec in a registry. - * + * */ template <class FwdFunc, class BwdFunc> struct Impl { - Impl(std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso_, + Impl(std::function<std::unique_ptr<ProdConso>(const AbsOperator&)> prodConso_, std::function<FwdFunc> forward_, std::function<BwdFunc> backward_ = nullptr): prodConso(prodConso_), forward(forward_), backward(backward_) {} - std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso; + std::function<std::unique_ptr<ProdConso>(const AbsOperator&)> prodConso; std::function<FwdFunc> forward; std::function<BwdFunc> backward; }; class OperatorImpl { public: - OperatorImpl(const Operator& op, const std::string& backend = ""); + OperatorImpl(const AbsOperator& op, const std::string& backend = ""); virtual void forward(); virtual void backward(); virtual std::shared_ptr<ProdConso> prodConso(); @@ -101,14 +101,14 @@ public: return mBackend; } - const Operator& getOperator() const noexcept { + const AbsOperator& getOperator() const noexcept { return mOp; } /** * @brief Get the operator required implementation specification, according * to the current operator configuration. - * + * */ ImplSpec getRequiredSpec() const; @@ -116,15 +116,15 @@ public: * @brief Get the best implementation that matches \p requiredSpecs. * If no implementation matches \p requiredSpecs, \p requiredSpecs is * returned. - * + * */ ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const; /** - * @brief Get an adapted meta operator corresponding to the required + * @brief Get an adapted meta operator corresponding to the required * specifications \p requiredSpecs from the implementation specifications * \p spec. - * + * * @param spec Implementation specification * @param requiredSpecs Required specifications * @return std::shared_ptr<Node> Adapted meta op or nullptr @@ -132,12 +132,12 @@ public: std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const; /** - * @brief Get the best adapted meta operator corresponding to the required + * @brief Get the best adapted meta operator corresponding to the required * specifications \p requiredSpecs. * The best adaptation is the one with the lowest overhead cost. - * Currently, it is the one requiring the least number of additionnal + * Currently, it is the one requiring the least number of additionnal * operators to match the available implementations. - * + * * @param requiredSpecs Required specifications * @return std::shared_ptr<Node> Adapted meta op or nullptr */ @@ -150,7 +150,7 @@ protected: virtual std::set<ImplSpec> getAvailableImplSpecs() const; bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const; - const Operator &mOp; + const AbsOperator &mOp; const std::string mBackend; std::shared_ptr<ProdConso> mProdConso; }; diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp index 6454ed233..243a5d2f5 100644 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ b/include/aidge/backend/cpu/data/TensorImpl.hpp @@ -143,6 +143,6 @@ static Registrar<Tensor> registrarTensorImpl_cpu_UInt16( static Registrar<Tensor> registrarTensorImpl_cpu_UInt8( {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create); } // namespace -} // namespace Aidge +} // namespace Aidge #endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */ diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp index 6c19b5355..7f9830f4d 100644 --- a/include/aidge/data/DataProvider.hpp +++ b/include/aidge/data/DataProvider.hpp @@ -31,7 +31,7 @@ class DataProvider { private: // Dataset providing the data to the dataProvider const Database& mDatabase; - + // Desired size of the produced batches const std::size_t mBatchSize; @@ -50,7 +50,7 @@ private: // mNbItems contains the number of items in the database std::size_t mNbItems; // mBatches contains the call order of each database item - std::vector<unsigned int> mBatches; + std::vector<unsigned int> mBatches; // mIndex browsing the number of batch std::size_t mIndexBatch; @@ -62,7 +62,7 @@ private: // Store each modality dimensions, backend and type std::vector<std::vector<std::size_t>> mDataDims; std::vector<std::string> mDataBackends; - std::vector<DataType> mDataTypes; + std::vector<DataType> mDataTypes; public: /** @@ -81,8 +81,8 @@ public: /** * @brief Get the Number of Batch. - * - * @return std::size_t + * + * @return std::size_t */ inline std::size_t getNbBatch(){ return mNbBatch; @@ -90,8 +90,8 @@ public: /** * @brief Get the current Index Batch. - * - * @return std::size_t + * + * @return std::size_t */ inline std::size_t getIndexBatch(){ return mIndexBatch; @@ -118,7 +118,7 @@ public: /** * @brief End condition of dataProvider for one pass on the database. - * + * * @return true when all batch were fetched, False otherwise */ inline bool done(){ @@ -129,15 +129,15 @@ public: // Functions for python iterator iter and next (definition in pybind.cpp) /** * @brief __iter__ method for iterator protocol - * - * @return DataProvider* + * + * @return DataProvider* */ DataProvider* iter(); /** * @brief __next__ method for iterator protocol - * - * @return std::vector<std::shared_ptr<Aidge::Tensor>> + * + * @return std::vector<std::shared_ptr<Aidge::Tensor>> */ std::vector<std::shared_ptr<Aidge::Tensor>> next(); }; diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 58e893ca5..3cd108e2e 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -891,6 +891,6 @@ private: mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>()); } }; -} // namespace Aidge +} // namespace Aidge #endif /* AIDGE_CORE_DATA_TENSOR_H_ */ diff --git a/include/aidge/filler/Filler.hpp b/include/aidge/filler/Filler.hpp index fe39771b6..d06d78790 100644 --- a/include/aidge/filler/Filler.hpp +++ b/include/aidge/filler/Filler.hpp @@ -45,6 +45,6 @@ template <typename T> void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = VarianceNorm::FanIn, T meanNorm = 0.0, T scaling = 1.0); -} // namespace Aidge +} // namespace Aidge #endif /* AIDGE_CORE_FILLER_FILLER_H_ */ diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp index 599ca7d6d..87090e2dc 100644 --- a/include/aidge/graph/Connector.hpp +++ b/include/aidge/graph/Connector.hpp @@ -81,6 +81,6 @@ class Connector { * @return std::shared_ptr<GraphView> */ std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors); -} // namespace Aidge +} // namespace Aidge #endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */ \ No newline at end of file diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index efdb06c4a..1c2f68dac 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -595,6 +595,6 @@ private: * @return GraphView GraphView containing all nodes with a path to node. */ std::shared_ptr<GraphView> getConnectedGraphView(std::shared_ptr<Node> node); -} // namespace Aidge +} // namespace Aidge #endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */ diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp index 951aa6b29..e8e21e89d 100644 --- a/include/aidge/graph/Matching.hpp +++ b/include/aidge/graph/Matching.hpp @@ -141,7 +141,7 @@ public: /** * @brief Same as match() but with a mandatory start node. - * + * * @param startNode Mandatory start node for the query. * @param query The query to search. * @return MatchingResult MatchingResult struct, with empty graph if query @@ -229,6 +229,6 @@ inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, // Matches rootNode are garanteed to be different! return lhs.graph->rootNode() < rhs.graph->rootNode(); } -} // namespace Aidge +} // namespace Aidge #endif /* AIDGE_CORE_GRAPH_MATCHING_H_ */ diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp index e014b041f..fdde7dda7 100644 --- a/include/aidge/graph/Node.hpp +++ b/include/aidge/graph/Node.hpp @@ -57,7 +57,7 @@ private: std::shared_ptr<DynamicAttributes> mAttrs; std::set<std::weak_ptr<GraphView>, weakCompare> mViews; /** Set of pointers to GraphView instances including this Node instance. */ - const std::shared_ptr<Operator> mOperator; // Pointer to the associated Operator + const std::shared_ptr<AbsOperator> mOperator; // Pointer to the associated Operator std::vector<NodePtr> mParents; /** List of parent node for each input (Parent --> Node --> Child) */ std::vector<std::vector<std::weak_ptr<Node>>> mChildren; /** List of children nodes for each output (Parent --> Node --> Child) */ @@ -75,15 +75,15 @@ public: * @param op Operator giving the Node its number of connections. * @param attrs Attributes for the Node. */ - Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs); - Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs); + Node(std::shared_ptr<AbsOperator> op, std::shared_ptr<DynamicAttributes> attrs); + Node(std::shared_ptr<AbsOperator> op, const DynamicAttributes& attrs); /** * @brief Construct a new Node object associated with the input Operator. * @param op Operator giving the Node its number of connections. * @param name (optional) name for the Node. */ - Node(std::shared_ptr<Operator> op, const std::string& name = ""); + Node(std::shared_ptr<AbsOperator> op, const std::string& name = ""); virtual ~Node(); @@ -172,7 +172,7 @@ public: * @brief Get the Operator object of the Node. * @return std::shared_ptr<Operator> */ - inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); } + inline std::shared_ptr<AbsOperator> getOperator() const { return (*mOperator)(mAttrs); } /////////////////////////////////////////////////////// // TENSOR MANAGEMENT @@ -498,7 +498,7 @@ private: /////////////////////////////////////////////////////// // cannot change operator for now - // void setOperator(const std::shared_ptr<Operator> op_ptr); + // void setOperator(const std::shared_ptr<AbsOperator> op_ptr); /////////////////////////////////////////////////////// // TENSOR MANAGEMENT diff --git a/include/aidge/graphRegex/GraphFsmInterpreter.hpp b/include/aidge/graphRegex/GraphFsmInterpreter.hpp index e2fd43b9e..5ac37ae6d 100644 --- a/include/aidge/graphRegex/GraphFsmInterpreter.hpp +++ b/include/aidge/graphRegex/GraphFsmInterpreter.hpp @@ -27,7 +27,7 @@ namespace Aidge { std::shared_ptr<FsmGraph> interpret(void); - + private: diff --git a/include/aidge/nodeTester/ConditionalInterpreter.hpp b/include/aidge/nodeTester/ConditionalInterpreter.hpp index af6a3b920..fc01e9f2a 100644 --- a/include/aidge/nodeTester/ConditionalInterpreter.hpp +++ b/include/aidge/nodeTester/ConditionalInterpreter.hpp @@ -130,7 +130,7 @@ class ConditionalRegisterFunction { errorMessage << "bad Number of argument: get " << args.size() << " need " << sizeof...(ParamsIdx) << "\n"; throw std::runtime_error(errorMessage.str()); } - //we used std::vector< std::shared_ptr<ConditionalData>> as a fifo + //we used std::vector< std::shared_ptr<ConditionalData>> as a fifo std::size_t offset = args.size()-sizeof...(ParamsIdx); using FuncTraits = function_traits<decltype(f)>; diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp index f1dc37003..4075e8614 100644 --- a/include/aidge/operator/Abs.hpp +++ b/include/aidge/operator/Abs.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Abs_Op : public OperatorTensor, public Registrable<Abs_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> { @@ -49,7 +50,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Abs_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<Abs_Op>(*this); } @@ -67,6 +68,8 @@ public: inline std::shared_ptr<Node> Abs(const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Abs_Op>(), name); } -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_ABS_H_ */ diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index f96996079..61f64a18a 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Registrar.hpp" namespace Aidge { +namespace Operator { class Add_Op : public OperatorTensor, public Registrable<Add_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>> { @@ -41,7 +42,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Add_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; // Data operator[](const char* inputName) override final { // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] : @@ -67,6 +68,8 @@ public: }; std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */ diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp index e4f04e2fa..8f55b6f5c 100644 --- a/include/aidge/operator/And.hpp +++ b/include/aidge/operator/And.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { /** * @brief Tensor element-wise logical and operation. @@ -57,7 +58,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::And_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<And_Op>(*this); } @@ -77,6 +78,8 @@ public: inline std::shared_ptr<Node> And(const std::string& name = "") { return std::make_shared<Node>(std::make_shared<And_Op>(), name); } + +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_AND_H_ */ diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp index 13f63ce98..8e5c861e6 100644 --- a/include/aidge/operator/ArgMax.hpp +++ b/include/aidge/operator/ArgMax.hpp @@ -26,23 +26,25 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex }; +namespace Operator { /** * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index. */ class ArgMax_Op : public OperatorTensor, public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> { +public: +enum class mAttr { Axis, KeepDims, SelectLastIndex }; public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ArgMaxAttr, + using Attributes_ = StaticAttributes<mAttr, std::int32_t, bool, bool>; - template <ArgMaxAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -52,17 +54,17 @@ public: /** * @brief constructor for ArgMax op * @param[in] axis around which perform the operation - * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and + * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and * if false we remove the dimension completely - * @param[in] select_last_index in case we have many maximum, if true the last index is returned - * if false the first index is returned. + * @param[in] select_last_index in case we have many maximum, if true the last index is returned + * if false the first index is returned. */ ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index) : OperatorTensor(Type, {InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( - attr<ArgMaxAttr::Axis>(axis), - attr<ArgMaxAttr::KeepDims>(keep_dims), - attr<ArgMaxAttr::SelectLastIndex>(select_last_index))) + attr<mAttr::Axis>(axis), + attr<mAttr::KeepDims>(keep_dims), + attr<mAttr::SelectLastIndex>(select_last_index))) {} /** @@ -84,7 +86,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ArgMax_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<ArgMax_Op>(*this); } @@ -94,9 +96,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); } - inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::KeepDims>(); } - inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); } + inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<mAttr::Axis>(); } + inline bool& keepDims() const noexcept { return mAttributes -> getAttr<mAttr::KeepDims>(); } + inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<mAttr::SelectLastIndex>(); } static const std::vector<std::string> getInputsName() { @@ -114,23 +116,24 @@ public: * @param axis Dimension over which data max should be computed. * @param keep_dims Whether or not reduced dimensions are to be erased. * @param select_last_index Whether to select the last index of max elements in case there are many maximums. - * By default the first max element index is + * By default the first max element index is * @param name Name of the Operator. * @return std::shared_ptr<Node> Node containing the Operator. */ inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0, bool keep_dims=true, bool select_last_index=false, - const std::string& name = "") { + const std::string& name = "") +{ return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name); - } -} // namespace Aidge +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"}; +const char *const EnumStrings<Aidge::Operator::ArgMax_Op::mAttr>::data[] = {"axis", "keep_dims", "select_last_index"}; } #endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */ diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index 54b40907e..a96240e38 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -24,20 +24,23 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class AvgPoolingAttr { StrideDims, KernelDims }; +namespace Operator { + template <DimIdx_t DIM> class AvgPooling_Op : public OperatorTensor, public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> { +public: +enum class mAttr { StrideDims, KernelDims }; public: static const std::string Type; private: - using Attributes_ = StaticAttributes<AvgPoolingAttr, + using Attributes_ = StaticAttributes<mAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>>; - template <AvgPoolingAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -50,8 +53,8 @@ public: const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) : OperatorTensor(Type, {InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( - attr<AvgPoolingAttr::StrideDims>(stride_dims), - attr<AvgPoolingAttr::KernelDims>(kernel_dims))) + attr<mAttr::StrideDims>(stride_dims), + attr<mAttr::KernelDims>(kernel_dims))) {} /** @@ -64,7 +67,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::AvgPooling_Op */ - std::shared_ptr<Operator> clone() const override final; + std::shared_ptr<AbsOperator> clone() const override final; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -80,8 +83,8 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); } - inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); } static const std::vector<std::string> getInputsName() { return {"data_input"}; @@ -105,7 +108,7 @@ inline std::shared_ptr<Node> AvgPooling( static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported"); return AvgPooling(to_array(kernel_dims), name, stride_dims); } -} // namespace Aidge +} // namespace Aidge extern template class Aidge::AvgPooling_Op<1>; extern template class Aidge::AvgPooling_Op<2>; @@ -114,10 +117,24 @@ extern template class Aidge::AvgPooling_Op<4>; namespace { template <> -const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = { +const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<1>::mAttr>::data[] = { "stride_dims", "kernel_dims" }; -} +const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<2>::mAttr>::data[] = { + "stride_dims", + "kernel_dims" +}; +const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<3>::mAttr>::data[] = { + "stride_dims", + "kernel_dims" +}; +const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<4>::mAttr>::data[] = { + "stride_dims", + "kernel_dims" +}; + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */ diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index cdac7935f..566893cd4 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -23,18 +23,21 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { -enum class BatchNormAttr { Epsilon, Momentum }; template <DimIdx_t DIM> class BatchNorm_Op : public OperatorTensor, public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> { +public: + enum class mAttr { Epsilon, Momentum }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<BatchNormAttr, float, float>; - template <BatchNormAttr e> + using Attributes_ = StaticAttributes<mAttr, float, float>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -51,8 +54,8 @@ public: InputCategory::Param}, 1), mAttributes(std::make_shared<Attributes_>( - attr<BatchNormAttr::Epsilon>(epsilon), - attr<BatchNormAttr::Momentum>(momentum))) {} + attr<mAttr::Epsilon>(epsilon), + attr<mAttr::Momentum>(momentum))) {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -64,7 +67,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::BatchNorm_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; // Data operator[](const char* inputName) override final { // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] : @@ -82,8 +85,8 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); } - inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); } + inline float& epsilon() const { return mAttributes->template getAttr<mAttr::Epsilon>(); } + inline float& momentum() const { return mAttributes->template getAttr<mAttr::Momentum>(); } static const std::vector<std::string> getInputsName() { return {"data_input", "scale", "shift", "mean", "variance"}; @@ -102,15 +105,19 @@ std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures, const float epsilon = 1.0e-5F, const float momentum = 0.1F, const std::string& name = ""); -} // namespace Aidge -extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&); -extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&); -extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&); +} // namespace Operator +} // namespace Aidge + +extern template std::shared_ptr<Aidge::Node> Aidge::Operator::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&); +extern template std::shared_ptr<Aidge::Node> Aidge::Operator::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&); +extern template std::shared_ptr<Aidge::Node> Aidge::Operator::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&); namespace { template <> -const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum" }; +const char *const EnumStrings<Aidge::Operator::BatchNorm_Op<2>::mAttr>::data[] = { "epsilon", "momentum" }; +const char *const EnumStrings<Aidge::Operator::BatchNorm_Op<3>::mAttr>::data[] = { "epsilon", "momentum" }; +const char *const EnumStrings<Aidge::Operator::BatchNorm_Op<4>::mAttr>::data[] = { "epsilon", "momentum" }; } #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_ diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp index bd14bea76..27c5faea0 100644 --- a/include/aidge/operator/BitShift.hpp +++ b/include/aidge/operator/BitShift.hpp @@ -23,9 +23,8 @@ #include "aidge/utils/Types.h" #include "aidge/utils/StaticAttributes.hpp" - namespace Aidge { - enum class BitShiftAttr { BitShiftdirection }; +namespace Operator { /** * @brief Tensor BitShift Operator @@ -33,19 +32,20 @@ namespace Aidge { class BitShift_Op : public OperatorTensor, public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> { public: + enum class mAttr { BitShiftdirection }; enum BitShiftDirection {left,right}; static const std::string Type; -private: +private: - using Attributes_ = StaticAttributes<BitShiftAttr,BitShiftDirection>; - template <BitShiftAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr,BitShiftDirection>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: - BitShift_Op(BitShiftDirection direction) + BitShift_Op(BitShiftDirection direction) : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( - attr<BitShiftAttr::BitShiftdirection>(direction))) + attr<mAttr::BitShiftdirection>(direction))) {} /**¨PPPP @@ -67,33 +67,33 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::BitShift_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<BitShift_Op>(*this); } bool forwardDims(bool allowDataDependency = false) override final; - + /** * @brief Setter to specify which backend to use - * + * * @return Boolean */ void setBackend(const std::string& name, DeviceIdx_t device = 0) override; std::set<std::string> getAvailableBackends() const override; - + /** * @brief Getter to retrieve Attributes of the bitshift class - * + * * @return Attributes */ inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } /** * @brief Retrieve the direction in which the shift should be applied (right or left) - * - * @return BitShiftDirection + * + * @return BitShiftDirection */ - inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<BitShiftAttr::BitShiftdirection>(); } + inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<mAttr::BitShiftdirection>(); } static const std::vector<std::string> getInputsName(){ return {"InputTensor", "ShiftAmount"}; @@ -105,20 +105,22 @@ public: }; /** - * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in - the direction specified by "direction" + * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in + the direction specified by "direction" * @param[in] direction Direction of the bitshift (Left or Right) * @param[in] name Name of the node - * @return std::shared_ptr<Node> + * @return std::shared_ptr<Node> */ - inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") { - return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name); - } +inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") { + return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name); +} + +} // namespace Operator } // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::BitShiftAttr>::data[] = {"BitShiftdirection"}; +const char *const EnumStrings<Aidge::Operator::BitShift_Op::mAttr>::data[] = {"BitShiftdirection"}; } diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp index 3fa1bb22a..eb2e22314 100644 --- a/include/aidge/operator/Cast.hpp +++ b/include/aidge/operator/Cast.hpp @@ -24,22 +24,26 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace AbsOperator { + class Cast_OpImpl : public OperatorImpl { public: - Cast_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Cast_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; -enum class CastAttr { TargetType }; class Cast_Op : public OperatorTensor, public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> { +public: +enum class mAttr { TargetType }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<CastAttr, DataType>; - template <CastAttr e> + using Attributes_ = StaticAttributes<mAttr, DataType>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -50,7 +54,7 @@ public: /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). - * @param op Operator to copy. + * @param op AbsOperator to copy. */ Cast_Op(const Cast_Op& op) : OperatorTensor(op), @@ -66,9 +70,9 @@ public: /** * @brief Clone the operator using its copy-constructor. - * @see Operator::Cast_Op + * @see AbsOperator::Cast_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<Cast_Op>(*this); } @@ -76,7 +80,7 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); } + inline DataType& targetType() const { return mAttributes->template getAttr<mAttr::TargetType>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -89,11 +93,12 @@ public: std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = ""); +} // namespace AbsOperator } // namespace Aidge namespace { template <> -const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" }; +const char* const EnumStrings<Aidge::Operator::Cast_Op::mAttr>::data[] = { "target_type" }; } #endif /* AIDGE_CORE_OPERATOR_CAST_H_ */ diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 98835dd2a..a97cef548 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -26,24 +26,28 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Concat_OpImpl : public OperatorImpl { public: - Concat_OpImpl(const Operator& op, const std::string& backend = "") + Concat_OpImpl(const AbsOperator& op, const std::string& backend = "") : OperatorImpl(op, backend) {} void forward() override; }; -enum class ConcatAttr { Axis }; class Concat_Op : public OperatorTensor, public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> { +public: +enum class mAttr { Axis }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>; - template <ConcatAttr e> + using Attributes_ = StaticAttributes<mAttr, std::int32_t>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -62,7 +66,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Concat_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool allowDataDependency = false) override final; @@ -70,7 +74,7 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); } + inline std::int32_t& axis() const { return mAttributes->template getAttr<mAttr::Axis>(); } static const std::vector<std::string> getInputsName(){ return {"data_input_0", "data_input_n"}; @@ -81,11 +85,13 @@ public: }; std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge namespace { template <> - const char* const EnumStrings<Aidge::ConcatAttr>::data[] = { + const char* const EnumStrings<Aidge::Operator::Concat_Op::mAttr>::data[] = { "axis" }; } diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp index 18e626544..1f48ee605 100644 --- a/include/aidge/operator/ConstantOfShape.hpp +++ b/include/aidge/operator/ConstantOfShape.hpp @@ -31,6 +31,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { enum class ConstantOfShapeAttr { /** @@ -63,7 +64,7 @@ private: public: /** * @brief constructor for ConstantOfShape_op - * @param[in] value : a scalar tensor which holds the value that will + * @param[in] value : a scalar tensor which holds the value that will * fill the output tensor */ ConstantOfShape_Op(const Tensor &value = Tensor(0.f)) @@ -90,7 +91,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::MatMul_Op */ - std::shared_ptr<Operator> clone() const override final { + std::shared_ptr<AbsOperator> clone() const override final { return std::make_shared<ConstantOfShape_Op>(*this); } @@ -125,6 +126,8 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f), return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value), name); } + +} // namespace Operator } // namespace Aidge namespace { diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index cd1a57dd9..90d356af7 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -30,17 +30,21 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class ConvAttr { StrideDims, DilationDims, KernelDims }; +namespace Operator { + template <DimIdx_t DIM> class Conv_Op : public OperatorTensor, public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> { +public: + enum class mAttr { StrideDims, DilationDims, KernelDims }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ConvAttr, + using Attributes_ = StaticAttributes<mAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>>; @@ -71,7 +75,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Conv_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<Conv_Op<DIM>>(*this); } @@ -114,7 +118,7 @@ public: } inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); } inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); } inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); } @@ -162,14 +166,21 @@ inline std::shared_ptr<Node> Conv( static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported"); return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias); } -} // namespace Aidge -extern template class Aidge::Conv_Op<1>; -extern template class Aidge::Conv_Op<2>; +} // namespace Operator +} // namespace Aidge + +extern template class Aidge::Operator::Conv_Op<1>; +extern template class Aidge::Operator::Conv_Op<2>; namespace { template <> -const char *const EnumStrings<Aidge::ConvAttr>::data[] = { +const char *const EnumStrings<Aidge::Operator::Conv_Op<1>::mAttr>::data[] = { + "stride_dims", + "dilation_dims", + "kernel_dims" +}; +const char *const EnumStrings<Aidge::Operator::Conv_Op<2>::mAttr>::data[] = { "stride_dims", "dilation_dims", "kernel_dims" diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index f0a55a299..8f03d4755 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -29,20 +29,24 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims }; +namespace Operator { + template <DimIdx_t DIM> class ConvDepthWise_Op : public OperatorTensor, public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> { +public: +enum class mAttr { StrideDims, DilationDims, KernelDims }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ConvDepthWiseAttr, + using Attributes_ = StaticAttributes<mAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>>; - template <ConvDepthWiseAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -55,9 +59,9 @@ public: const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1), mAttributes(std::make_shared<Attributes_>( - attr<ConvDepthWiseAttr::StrideDims>(stride_dims), - attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), - attr<ConvDepthWiseAttr::KernelDims>(kernel_dims))) + attr<mAttr::StrideDims>(stride_dims), + attr<mAttr::DilationDims>(dilation_dims), + attr<mAttr::KernelDims>(kernel_dims))) {} /** @@ -70,7 +74,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ConvDepthWise_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<ConvDepthWise_Op<DIM>>(*this); } @@ -93,9 +97,9 @@ public: } inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); } - inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); } - inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<mAttr::DilationDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); } static const std::vector<std::string> getInputsName(){ return {"data_input", "weight", "bias"}; @@ -125,14 +129,17 @@ inline std::shared_ptr<Node> ConvDepthWise( static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported"); return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias); } -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge extern template class Aidge::ConvDepthWise_Op<1>; extern template class Aidge::ConvDepthWise_Op<2>; namespace { template <> -const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"stride_dims", "dilation_dims", +const char *const EnumStrings<Aidge::Operator::ConvDepthWise_Op<1>::mAttr>::data[] = {"stride_dims", "dilation_dims", +const char *const EnumStrings<Aidge::Operator::ConvDepthWise_Op<2>::mAttr>::data[] = {"stride_dims", "dilation_dims", "kernel_dims"}; } diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp index 856cd0e85..bf3f155a9 100644 --- a/include/aidge/operator/DepthToSpace.hpp +++ b/include/aidge/operator/DepthToSpace.hpp @@ -23,26 +23,30 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class DepthToSpace_OpImpl : public OperatorImpl { public: - DepthToSpace_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + DepthToSpace_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; -enum class DepthToSpaceAttr { BlockSize, Mode }; class DepthToSpace_Op : public OperatorTensor, public Registrable<DepthToSpace_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> { +public: +enum class mAttr { BlockSize, Mode }; + public: static const std::string Type; enum class Mode { DCR, CRD }; private: - using Attributes_ = StaticAttributes<DepthToSpaceAttr, std::uint32_t, Mode>; - template <DepthToSpaceAttr e> + using Attributes_ = StaticAttributes<mAttr, std::uint32_t, Mode>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -63,7 +67,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::DepthToSpace_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -71,8 +75,8 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); } - inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); } + inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<mAttr::BlockSize>(); } + inline Mode& mode() const { return mAttributes->template getAttr<mAttr::Mode>(); } static const std::vector<std::string> getInputsName() { return {"data_input"}; @@ -86,11 +90,12 @@ std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize, const DepthToSpace_Op::Mode mode = DepthToSpace_Op::Mode::CRD, const std::string& name = ""); -} // namespace Aidge +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" }; +const char *const EnumStrings<Aidge::Operator::DepthToSpace_Op::mAttr>::data[] = { "block_size", "mode" }; } #endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_ diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index 5ed9e789d..3eb0e93a0 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Div_Op : public OperatorTensor, public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> { @@ -50,7 +51,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Div_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<Div_Op>(*this); } @@ -69,6 +70,7 @@ public: std::shared_ptr<Node> Div(const std::string& name = ""); +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_DIV_H_ */ diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp index 88a4bfd29..38426962f 100644 --- a/include/aidge/operator/Erf.hpp +++ b/include/aidge/operator/Erf.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Erf_Op : public OperatorTensor, public Registrable<Erf_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>> { @@ -41,7 +42,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Erf_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; std::set<std::string> getAvailableBackends() const override; @@ -55,6 +56,8 @@ public: }; std::shared_ptr<Node> Erf(const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_ERF_H_ */ diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 592ba4e2b..0269e4db6 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -24,6 +24,8 @@ #include "aidge/utils/Registrar.hpp" namespace Aidge { +namespace Operator { + class FC_Op : public OperatorTensor, public Registrable<FC_Op, std::string, @@ -53,7 +55,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::FC_Op */ - std::shared_ptr<Operator> clone() const override final; + std::shared_ptr<AbsOperator> clone() const override final; void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; @@ -86,6 +88,7 @@ public: std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = ""); +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_FC_H_ */ diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp index 517d63adc..9e0dae969 100644 --- a/include/aidge/operator/Fold.hpp +++ b/include/aidge/operator/Fold.hpp @@ -30,11 +30,13 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims }; +namespace Operator { template <DimIdx_t DIM> class Fold_Op : public OperatorTensor, public Registrable<Fold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>> { +public: +enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims }; public: static const std::string Type; @@ -73,7 +75,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Fold_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -111,13 +113,15 @@ inline std::shared_ptr<Node> Fold( static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported"); return Fold(to_array(outputDims), to_array(kernelDims), name, strideDims, dilationDims); } -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge extern template class Aidge::Fold_Op<2>; namespace { template <> -const char *const EnumStrings<Aidge::FoldAttr>::data[] = { +const char *const EnumStrings<Aidge::Operator::Fold_Op<2>::mAttr>::data[] = { "output_dims", "stride_dims", "dilation_dims", diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index 80dcdd678..43bfcdb53 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -25,27 +25,31 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Gather_OpImpl : public OperatorImpl { public: - Gather_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Gather_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; -enum class GatherAttr { Axis, Indices, GatheredShape }; class Gather_Op : public OperatorTensor, public Registrable<Gather_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> { +public: +enum class mAttr { Axis, Indices, GatheredShape }; + public: static const std::string Type; - using Attributes_ = StaticAttributes<GatherAttr, + using Attributes_ = StaticAttributes<mAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>>; private: - template <GatherAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -67,7 +71,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Gather_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool dimsForwarded() const override final; bool forwardDims(bool allowDataDependency = false) override final; @@ -76,9 +80,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); } - inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); } - inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); } + inline std::int8_t& axis() const { return mAttributes -> getAttr<mAttr::Axis>(); } + inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<mAttr::Indices>(); } + inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<mAttr::GatheredShape>(); } static const std::vector<std::string> getInputsName(){ return {"data_input", "indices"}; @@ -89,11 +93,13 @@ public: }; std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = ""); + +} // namespace Operator } // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"}; +const char *const EnumStrings<Aidge::Operator::Gather_Op::mAttr>::data[] = {"axis", "indices", "gathered_shape"}; } #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */ diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 89b2c06a5..f5fe30308 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -24,6 +24,8 @@ namespace Aidge { +namespace Operator { + class GenericOperator_Op : public OperatorTensor, public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> { @@ -51,7 +53,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::GenericOperator_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; public: bool forwardDims(bool allowDataDependency = false) override final; @@ -107,6 +109,7 @@ std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector */ std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut, const std::string& name = ""); -} // namespace Aidge + +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */ diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp index ef440e8c6..6c2759269 100644 --- a/include/aidge/operator/GlobalAveragePooling.hpp +++ b/include/aidge/operator/GlobalAveragePooling.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { /** * @brief Description for the tensor data structure. @@ -41,7 +42,7 @@ public: GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op); - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool allowDataDependency = false) override final; @@ -58,6 +59,7 @@ public: std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = ""); +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_GLOBAL_AVERAGE_POOLING_H_ */ diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp index dc2b2059e..a4384fe3c 100644 --- a/include/aidge/operator/GridSample.hpp +++ b/include/aidge/operator/GridSample.hpp @@ -24,6 +24,7 @@ #include "aidge/utils/StaticAttributes.hpp" namespace Aidge { +namespace Operator { enum class GridSampleAttr { Mode, PaddingMode, AlignCorners }; @@ -53,7 +54,7 @@ public: public: - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependencies*/ = false) override final; @@ -79,6 +80,7 @@ std::shared_ptr<Node> GridSample( bool alignCorners = false, const std::string& name = ""); +} // namespace Operator } // namespace Aidge diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp index f660cc64e..37b644862 100644 --- a/include/aidge/operator/ILayerNorm.hpp +++ b/include/aidge/operator/ILayerNorm.hpp @@ -26,6 +26,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class ILayerNorm_Op : public OperatorTensor, public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> { @@ -54,7 +55,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ILayerNorm_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<ILayerNorm_Op>(*this); } @@ -76,6 +77,8 @@ public: inline std::shared_ptr<Node> ILayerNorm(const std::string& name = "") { return std::make_shared<Node>(std::make_shared<ILayerNorm_Op>(), name); } -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_ILAYERNORM_H_ */ diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index 24476f231..079718cf8 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -26,6 +26,8 @@ #include "aidge/utils/ErrorHandling.hpp" namespace Aidge { +namespace Operator { + class Identity_OpImpl : public OperatorImpl { public: Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} @@ -57,7 +59,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Identity_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; @@ -72,6 +74,7 @@ public: std::shared_ptr<Node> Identity(const std::string& name = ""); -} +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_IDENTITY_H_ */ diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 179eb90b3..97bbdaba8 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -24,18 +24,19 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class LeakyReLUAttr { - NegativeSlope -}; +namespace Operator { class LeakyReLU_Op : public OperatorTensor, public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> { +public: + enum class mAttr { NegativeSlope }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<LeakyReLUAttr, float>; - template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, float>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -46,7 +47,7 @@ public: : OperatorTensor(Type, {InputCategory::Data}, 1), mAttributes( std::make_shared<Attributes_>( - attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))) + attr<mAttr::NegativeSlope>(negativeSlope))) {} /** @@ -59,13 +60,13 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::LeakyReLU_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); } + inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<mAttr::NegativeSlope>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -76,11 +77,13 @@ public: }; std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[] +const char* const EnumStrings<Aidge::Operator::LeakyReLU_Op::mAttr>::data[] = {"negative_slope"}; } diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Log.hpp similarity index 77% rename from include/aidge/operator/Ln.hpp rename to include/aidge/operator/Log.hpp index 22fc51664..6d3023aa6 100755 --- a/include/aidge/operator/Ln.hpp +++ b/include/aidge/operator/Log.hpp @@ -24,25 +24,26 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { -class Ln_Op : public OperatorTensor, - public Registrable<Ln_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>> { +class Log_Op : public OperatorTensor, + public Registrable<Log_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Log_Op&)>> { public: static const std::string Type; - Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {} + Log_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - Ln_Op(const Ln_Op& op); + Log_Op(const Log_Op& op); /** * @brief Clone the operator using its copy-constructor. - * @see Operator::Ln_Op + * @see Operator::Log_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; @@ -56,7 +57,9 @@ public: } }; -std::shared_ptr<Node> Ln(const std::string& name = ""); -} +std::shared_ptr<Node> Log(const std::string& name = ""); + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_LN_H_ */ diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index bf6ab84c7..f6be6d423 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -22,6 +22,7 @@ #include "aidge/utils/Registrar.hpp" namespace Aidge { +namespace Operator { class MatMul_Op : public OperatorTensor, public Registrable<MatMul_Op, @@ -42,7 +43,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::MatMul_Op */ - std::shared_ptr<Operator> clone() const override final; + std::shared_ptr<AbsOperator> clone() const override final; /** * @brief Compute dimensions for the output Tensor following the same rules as @@ -70,6 +71,8 @@ public: }; std::shared_ptr<Node> MatMul(const std::string& name = ""); + +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */ diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 0cc43a6fb..48d1861dc 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -29,21 +29,25 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode }; +namespace Operator { + template <DimIdx_t DIM> class MaxPooling_Op : public OperatorTensor, public Registrable<MaxPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>> { +public: +enum class mAttr { StrideDims, KernelDims, CeilMode }; + public: static const std::string Type; - using Attributes_ = StaticAttributes<MaxPoolingAttr, + using Attributes_ = StaticAttributes<mAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, bool>; private: - template <MaxPoolingAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -64,7 +68,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::MaxPooling_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -72,9 +76,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); } - inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); } - inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); } + inline bool& ceilMode() const { return mAttributes->template getAttr<mAttr::CeilMode>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -104,11 +108,13 @@ inline std::shared_ptr<Node> MaxPooling( static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported"); return MaxPooling(to_array(kernel_dims), name, stride_dims, ceil_mode); } -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"}; +const char *const EnumStrings<Aidge::Operator::MaxPooling_Op::mAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"}; } #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */ diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp index 2b05b5fff..e52a3b893 100644 --- a/include/aidge/operator/Memorize.hpp +++ b/include/aidge/operator/Memorize.hpp @@ -25,9 +25,11 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Memorize_ProdConso : public ProdConso { public: - Memorize_ProdConso(const Operator& op): ProdConso(op) {} + Memorize_ProdConso(const AbsOperator& op): ProdConso(op) {} Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final; Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final; void updateConsummerProducer() override; @@ -35,21 +37,24 @@ public: class Memorize_OpImpl : public OperatorImpl { public: - Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Memorize_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); }; void forward() override; }; -enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep }; class Memorize_Op : public OperatorTensor, public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> { + +public: +enum class mAttr { ScheduleStep, ForwardStep, EndStep }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>; - template <MemorizeAttr e> + using Attributes_ = StaticAttributes<mAttr, std::uint32_t, std::uint32_t, std::uint32_t>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -69,7 +74,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Memorize_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; @@ -80,9 +85,9 @@ public: void forward() override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); } - inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); } - inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); } + inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<mAttr::ScheduleStep>(); } + inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<mAttr::ForwardStep>(); } + inline std::uint32_t& endStep() const { return mAttributes->template getAttr<mAttr::EndStep>(); } static const std::vector<std::string> getInputsName(){ return {"data_input", "data_input_init"}; @@ -93,11 +98,13 @@ public: }; std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = ""); -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = { +const char *const EnumStrings<Aidge::Operator::Memorize_Op::mAttr>::data[] = { "schedule_step", "forward_step", "end_step" diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 744dbd132..d4d476f35 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -27,6 +27,8 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class MetaOperator_Op : public OperatorTensor, public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> { public: @@ -59,7 +61,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::MetaOperator_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept { return mGraph; @@ -115,6 +117,8 @@ std::shared_ptr<Node> MetaOperator(const char *type, const std::shared_ptr<GraphView>& graph, const std::vector<InputCategory>& forcedInputsCategory = {}, const std::string& name = ""); -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge #endif /* MetaOperator_H_ */ diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp index 750a808aa..e56f5848f 100644 --- a/include/aidge/operator/MetaOperatorDefs.hpp +++ b/include/aidge/operator/MetaOperatorDefs.hpp @@ -30,7 +30,7 @@ #include "aidge/utils/Types.h" namespace Aidge { - +namespace Operator { template <std::array<DimSize_t, 1>::size_type DIM> extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels, @@ -165,6 +165,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels, std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length); -} // namespace Aidge +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */ diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp index 49d92cd12..15dfb2da2 100644 --- a/include/aidge/operator/Move.hpp +++ b/include/aidge/operator/Move.hpp @@ -24,9 +24,11 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Move_OpImpl : public OperatorImpl { public: - Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Move_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; @@ -47,7 +49,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Move_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override; std::set<std::string> getAvailableBackends() const override; @@ -62,6 +64,7 @@ public: std::shared_ptr<Node> Move(const std::string& name = ""); +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */ diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index bfe4fcb0d..4997d259a 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { /** * @brief Tensor element-wise multiplication. @@ -45,7 +46,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Mul_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool allowDataDependency = false) override final; @@ -62,6 +63,7 @@ public: std::shared_ptr<Node> Mul(const std::string& name = ""); +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_MUL_H_ */ diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index a799153e1..61e8b18b7 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -33,7 +33,9 @@ #ifdef PYBIND namespace py = pybind11; #endif + namespace Aidge { +namespace Operator { enum class OperatorType { Data, @@ -47,7 +49,7 @@ enum class InputCategory { OptionalParam }; -class Operator : public std::enable_shared_from_this<Operator> { +class AbsOperator : public std::enable_shared_from_this<AbsOperator> { protected: std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator std::shared_ptr<DynamicAttributes> mInheritedAttrs; @@ -60,8 +62,8 @@ private: std::set<IOIndex_t> mBackEdges; public: - Operator() = delete; - Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data) + AbsOperator() = delete; + AbsOperator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data) : mType(type), mOperatorType(operatorType), mInputsCategory(inputsCategory), @@ -70,8 +72,8 @@ public: // ctor } - Operator(const Operator& op): - std::enable_shared_from_this<Operator>(), + AbsOperator(const AbsOperator& op): + std::enable_shared_from_this<AbsOperator>(), mOperatorType(op.mOperatorType), mInputsCategory(op.mInputsCategory), mNbOut(op.mNbOut), @@ -82,15 +84,15 @@ public: // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation. // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion. } - std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) { + std::shared_ptr<AbsOperator> operator()(std::shared_ptr<DynamicAttributes> attrs) { mInheritedAttrs = attrs; return shared_from_this(); } - virtual ~Operator() noexcept; + virtual ~AbsOperator() noexcept; public: - virtual std::shared_ptr<Operator> clone() const = 0; + virtual std::shared_ptr<AbsOperator> clone() const = 0; virtual std::shared_ptr<Attributes> attributes() const { return nullptr; }; virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const { return mInheritedAttrs; }; @@ -239,6 +241,8 @@ public: } #endif }; + +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_OPERATOR_H_ */ diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index c8cdd9381..869f1355a 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -21,9 +21,10 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Tensor; -class OperatorTensor : public Operator { +class OperatorTensor : public AbsOperator { /* TODO: Add an attribute specifying the type of Data used by the Operator. * The same way ``Type`` attribute specifies the type of Operator. Hence this * attribute could be checked in the forwardDims function to assert Operators @@ -43,7 +44,7 @@ public: /** * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor * every operator class derive from this class. - * + * * @param[in] type : type of operator (i.e. "Add", "AveragePool",...) * @param[in] inputsCategory : describes the type of each input. * @param[in] nbOut : Number of tensors this operator will output @@ -94,7 +95,7 @@ public: * - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected. * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors. * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode. - * + * */ virtual bool forwardDims(bool allowDataDependency = false); virtual bool dimsForwarded() const; @@ -108,6 +109,8 @@ public: protected: bool inputsAssociated(bool checkNonEmpty = true) const; }; -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge #endif // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_ \ No newline at end of file diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index 2c670bf23..85b298ac5 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -24,21 +24,25 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class PadAttr { BeginEndBorders, BorderType, BorderValue }; -enum class PadBorderType { Constant, Edge, Reflect, Wrap }; +namespace Operator { + template <DimIdx_t DIM> class Pad_Op : public OperatorTensor, public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>> { +public: +enum class mAttr { BeginEndBorders, BorderType, BorderValue }; +enum class BorderType { Constant, Edge, Reflect, Wrap }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<PadAttr, + using Attributes_ = StaticAttributes<mAttr, std::array<DimSize_t, 2*DIM>, - PadBorderType, + BorderType, double>; - template <PadAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -47,13 +51,13 @@ public: Pad_Op() = delete; constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples, - const PadBorderType &borderType = PadBorderType::Constant, + const BorderType &borderType = BorderType::Constant, double borderValue = 0.0) : OperatorTensor(Type, {InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( - attr<PadAttr::BeginEndBorders>(beginEndTuples), - attr<PadAttr::BorderType>(borderType), - attr<PadAttr::BorderValue>(borderValue))) {} + attr<mAttr::BeginEndBorders>(beginEndTuples), + attr<mAttr::BorderType>(borderType), + attr<mAttr::BorderValue>(borderValue))) {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -68,7 +72,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Pad_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -77,9 +81,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); } - inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); } - inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); } + inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<mAttr::BeginEndBorders>(); } + inline BorderType& borderType() const noexcept { return mAttributes->template getAttr<mAttr::BorderType>(); } + inline double& borderValue() const noexcept { return mAttributes->template getAttr<mAttr::BorderValue>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -92,7 +96,7 @@ public: template <std::array<DimSize_t, 1>::size_type DIM> std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples, const std::string& name = "", - const PadBorderType &borderType = PadBorderType::Constant, + const BorderType &borderType = BorderType::Constant, double borderValue = 0.0); // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction @@ -100,22 +104,26 @@ template <DimSize_t DIM> inline std::shared_ptr<Node> Pad( DimSize_t const (&beginEndTuples)[2*DIM], const std::string& name = "", - const PadBorderType &borderType = PadBorderType::Constant, + const BorderType &borderType = BorderType::Constant, double borderValue = 0.0) { return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue); } -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge extern template class Aidge::Pad_Op<1>; extern template class Aidge::Pad_Op<2>; namespace { template <> -const char *const EnumStrings<Aidge::PadAttr>::data[] = {"begin_end_borders", "border_type", "border_value"}; +const char *const EnumStrings<Aidge::Operator::Pad_Op<1>::mAttr>::data[] = {"begin_end_borders", "border_type", "border_value"}; +const char *const EnumStrings<Aidge::Operator::Pad_Op<2>::mAttr>::data[] = {"begin_end_borders", "border_type", "border_value"}; template <> -const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"}; +const char *const EnumStrings<Aidge::Operator::Pad_Op<1>::BorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"}; +const char *const EnumStrings<Aidge::Operator::Pad_Op<2>::BorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"}; } #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */ diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp index d5898b363..33114d4a6 100644 --- a/include/aidge/operator/Pop.hpp +++ b/include/aidge/operator/Pop.hpp @@ -24,29 +24,34 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Pop_ProdConso : public ProdConso { public: - Pop_ProdConso(const Operator& op): ProdConso(op) {} + Pop_ProdConso(const AbsOperator& op): ProdConso(op) {} Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override; }; class Pop_OpImpl : public OperatorImpl { public: - Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Pop_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); }; void forward() override; }; -enum class PopAttr { ForwardStep }; class Pop_Op : public OperatorTensor, public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> { + +public: +enum class mAttr { ForwardStep }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<PopAttr, std::uint32_t>; - template <PopAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, std::uint32_t>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -62,7 +67,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Pop_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; @@ -72,7 +77,7 @@ public: void forward() override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); } + inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<mAttr::ForwardStep>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -83,11 +88,13 @@ public: }; std::shared_ptr<Node> Pop(const std::string& name = ""); -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::PopAttr>::data[] = { +const char *const EnumStrings<Aidge::Operator::Pop_Op::mAttr>::data[] = { "forward_step" }; } diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index f6762dd33..8ed5a20f8 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Pow_Op : public OperatorTensor, public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> { @@ -49,7 +50,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Pow_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<Pow_Op>(*this); } @@ -68,6 +69,8 @@ public: }; std::shared_ptr<Node> Pow(const std::string& name = ""); + +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_POW_H_ */ diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 115ddcb55..db8561982 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -25,19 +25,21 @@ #include "aidge/utils/Registrar.hpp" namespace Aidge { - -enum class ProdAttr { Constant }; +namespace Operator { class Producer_Op : public OperatorTensor, public Registrable<Producer_Op, std::string, std::function<std::shared_ptr<OperatorImpl>( const Producer_Op &)>> { +public: +enum class mAttr { Constant }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ProdAttr, bool>; - template <ProdAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, bool>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -75,7 +77,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Producer_Op(const Producer_Op&) */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final { AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input."); @@ -92,7 +94,7 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); } + inline bool& constant() const { return mAttributes->template getAttr<mAttr::Constant>(); } static const std::vector<std::string> getInputsName(){ return {}; @@ -132,11 +134,13 @@ template <std::size_t DIM> std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) { return addProducer(otherNode, inputIdx, to_array(dims), extension); } + +} // namespace Operator } // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::ProdAttr>::data[] = { +const char *const EnumStrings<Aidge::Operator::Producer_Op::mAttr>::data[] = { "constant" }; } diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 9b264c1d3..cc0eee6e0 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -24,6 +24,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class ReLU_Op : public OperatorTensor, public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> { @@ -42,7 +43,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ReLU_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; @@ -57,6 +58,8 @@ public: }; std::shared_ptr<Node> ReLU(const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index 5d5895a8f..86f947d76 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -26,7 +26,7 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes }; +namespace Operator { /** * @brief This operator has as purpose to reduce given axes by replacing with the mean value. @@ -34,15 +34,18 @@ enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes }; class ReduceMean_Op : public OperatorTensor, public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> { +public: +enum class mAttr { Axes, KeepDims, NoopWithEmptyAxes }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ReduceMeanAttr, + using Attributes_ = StaticAttributes<mAttr, std::vector<std::int32_t>, bool, bool>; - template <ReduceMeanAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -52,7 +55,7 @@ public: /** * @brief constructor for ReduceMean op * @param[in] axes around which perform the operation - * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and + * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and * if false we remove the dimension completely * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing * and if false, we reduce on all axes @@ -69,7 +72,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ReduceMean_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool allowDataDependency = false) override final; @@ -77,9 +80,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); } - inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); } - inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); } + inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<mAttr::Axes>(); } + inline bool& keepDims() const noexcept { return mAttributes -> getAttr<mAttr::KeepDims>(); } + inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<mAttr::NoopWithEmptyAxes>(); } static const std::vector<std::string> getInputsName() { @@ -120,11 +123,12 @@ std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes, bool noop_with_empty_axes=false, const std::string& name = ""); -} // namespace Aidge +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"}; +const char *const EnumStrings<Aidge::Operator::ReduceMean::mAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"}; } #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */ diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp index bae03cb7d..25ec686c7 100644 --- a/include/aidge/operator/ReduceSum.hpp +++ b/include/aidge/operator/ReduceSum.hpp @@ -26,8 +26,7 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes }; - +namespace Operator { /** * @brief This operator has as purpose to reduce given axes by replacing with the sum value. @@ -35,15 +34,18 @@ enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes }; class ReduceSum_Op : public OperatorTensor, public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> { +public: +enum class mAttr { Axes, KeepDims, NoopWithEmptyAxes }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ReduceSumAttr, + using Attributes_ = StaticAttributes<mAttr, std::vector<std::int32_t>, bool, bool>; - template <ReduceSumAttr e> + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -53,7 +55,7 @@ public: /** * @brief constructor for ReduceSum op * @param[in] axes around which perform the operation - * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and + * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and * if false we remove the dimension completely * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing * and if false, we reduce on all axes @@ -61,9 +63,9 @@ public: ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes) : OperatorTensor(Type, {InputCategory::Data}, 1), mAttributes(std::make_shared<Attributes_>( - attr<ReduceSumAttr::Axes>(axes), - attr<ReduceSumAttr::KeepDims>(keep_dims), - attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes))) + attr<mAttr::Axes>(axes), + attr<mAttr::KeepDims>(keep_dims), + attr<mAttr::NoopWithEmptyAxes>(noop_with_empty_axes))) {} /** @@ -85,7 +87,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ReduceSum_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<AbsOperator> clone() const override { return std::make_shared<ReduceSum_Op>(*this); } @@ -95,9 +97,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); } - inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); } - inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); } + inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<mAttr::Axes>(); } + inline bool& keepDims() const noexcept { return mAttributes -> getAttr<mAttr::KeepDims>(); } + inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<mAttr::NoopWithEmptyAxes>(); } static const std::vector<std::string> getInputsName() { @@ -126,11 +128,13 @@ inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={}, return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims, noop_with_empty_axes), name); } -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"}; +const char *const EnumStrings<Aidge::Operator::ReduceSum_Op::mAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"}; } #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */ diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index 721b964d3..cc471c15b 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -23,25 +23,28 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Reshape_OpImpl : public OperatorImpl { public: - Reshape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Reshape_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; -enum class ReshapeAttr { Shape, AllowZero }; - class Reshape_Op : public OperatorTensor, public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> { +public: +enum class mAttr { Shape, AllowZero }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ReshapeAttr, + using Attributes_ = StaticAttributes<mAttr, std::vector<std::int64_t>, bool>; - template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -59,7 +62,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Reshape_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool dimsForwarded() const override final; bool forwardDims(bool allowDataDependency = false) override final; @@ -68,8 +71,8 @@ public: std::set<std::string> getAvailableBackends() const override; std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); } - inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); } + inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<mAttr::Shape>(); } + inline bool& allowZero() const { return mAttributes->template getAttr<mAttr::AllowZero>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -82,11 +85,13 @@ public: std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {}, bool allowzero = false, const std::string &name = ""); -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "shape", "allow_zero" }; +const char *const EnumStrings<Aidge::Operator::Reshape_Op::mAttr>::data[] = { "shape", "allow_zero" }; } #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */ diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp index a48b95aff..e55e640bb 100644 --- a/include/aidge/operator/Resize.hpp +++ b/include/aidge/operator/Resize.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Resize_Op : public OperatorTensor, public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{ @@ -43,7 +44,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Resize_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool dimsForwarded() const override final; bool forwardDims(bool allowDataDependency = false) override final; @@ -62,7 +63,8 @@ public: std::shared_ptr<Node> Resize(const std::string &name = ""); -} // namespace Aidge +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_Resize_H_ */ \ No newline at end of file diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp index 00352421d..2031b735e 100644 --- a/include/aidge/operator/Round.hpp +++ b/include/aidge/operator/Round.hpp @@ -23,13 +23,12 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Round_Op : public OperatorTensor, public Registrable<Round_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>> { - - public: static const std::string Type; @@ -45,7 +44,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Round_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; @@ -58,7 +57,8 @@ public: }; std::shared_ptr<Node> Round(const std::string& name = ""); -} +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_ROUND_H_ */ diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index 4ef39f63a..9a0afff93 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -24,19 +24,22 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class ScalingAttr { - ScalingFactor, QuantizedNbBits, IsOutputUnsigned -}; +namespace Operator { class Scaling_Op : public OperatorTensor, public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> { +public: +enum class mAttr { + ScalingFactor, QuantizedNbBits, IsOutputUnsigned +}; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>; - template <ScalingAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, float, std::size_t, bool>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -54,15 +57,15 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Scaling_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); } - inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); } - inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); } + inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<mAttr::ScalingFactor>(); } + inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<mAttr::QuantizedNbBits>(); } + inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<mAttr::IsOutputUnsigned>(); } static const std::vector<std::string> getInputsName() { return {"data_input"}; @@ -81,11 +84,13 @@ std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, std::size_t quantizedNbBits=8, bool isOutputUnsigned=true, const std::string& name = ""); + +} // namespace Operator } // namespace Aidge namespace { template <> -const char* const EnumStrings<Aidge::ScalingAttr>::data[] +const char* const EnumStrings<Aidge::Operator::Scaling_Op::Attr>::data[] = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"}; } diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp index cfd43fa0d..d5b586ace 100644 --- a/include/aidge/operator/Shape.hpp +++ b/include/aidge/operator/Shape.hpp @@ -25,25 +25,28 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Shape_OpImpl : public OperatorImpl { public: - Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Shape_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; -enum class ShapeAttr { Start, End }; class Shape_Op : public OperatorTensor, public Registrable<Shape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> { +public: +enum class mAttr { Start, End }; public: static const std::string Type; private: - using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>; - template <ShapeAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, std::int64_t, std::int64_t>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -61,7 +64,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Shape_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -69,8 +72,8 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); } - inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); } + inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<mAttr::Start>(); } + inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<mAttr::End>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -81,11 +84,13 @@ public: }; std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = ""); + +} // namespace Operator } // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"}; +const char *const EnumStrings<Aidge::Operator::Shape_Op::mAttr>::data[] = {"start", "end"}; } #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */ diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp index 30f1d71e0..9c199ba15 100644 --- a/include/aidge/operator/ShiftGELU.hpp +++ b/include/aidge/operator/ShiftGELU.hpp @@ -26,6 +26,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class ShiftGELU_Op : public OperatorTensor, public Registrable<ShiftGELU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>> { @@ -44,7 +45,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ShiftGELU_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; @@ -59,6 +60,8 @@ public: }; std::shared_ptr<Node> ShiftGELU(const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */ diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp index 9fbd81aed..6cdea1ba4 100644 --- a/include/aidge/operator/ShiftMax.hpp +++ b/include/aidge/operator/ShiftMax.hpp @@ -26,6 +26,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class ShiftMax_Op : public OperatorTensor, public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> { @@ -44,7 +45,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::ShiftMax_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; @@ -59,6 +60,8 @@ public: }; std::shared_ptr<Node> ShiftMax(const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */ diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp index 24bc33216..98ab4197a 100644 --- a/include/aidge/operator/Sigmoid.hpp +++ b/include/aidge/operator/Sigmoid.hpp @@ -24,6 +24,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Sigmoid_Op : public OperatorTensor, public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> { @@ -34,7 +35,7 @@ public: Sigmoid_Op(const Sigmoid_Op& op); - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; @@ -48,6 +49,8 @@ public: }; std::shared_ptr<Node> Sigmoid(const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */ \ No newline at end of file diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 811402420..9f3391da4 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -24,22 +24,24 @@ #include "aidge/utils/Types.h" namespace Aidge { - -enum class SliceAttr { Starts, Ends, Axes, Steps }; +namespace Operator { class Slice_Op : public OperatorTensor, public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> { +public: +enum class mAttr { Starts, Ends, Axes, Steps }; + public: static const std::string Type; private: - using Attributes_ = StaticAttributes<SliceAttr, + using Attributes_ = StaticAttributes<mAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>>; - template <SliceAttr e> using attr = typename Attributes_::template attr<e>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -63,7 +65,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Slice_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool dimsForwarded() const override final; bool forwardDims(bool allowDataDependency = true) override final; @@ -72,10 +74,10 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); } - inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); } - inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); } - inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); } + inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<mAttr::Starts>(); } + inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<mAttr::Ends>(); } + inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<mAttr::Axes>(); } + inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<mAttr::Steps>(); } static const std::vector<std::string> getInputsName(){ return {"data_input", "starts", "ends", "axes", "steps"}; @@ -96,11 +98,13 @@ std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {}, const std::vector<std::int8_t>& axes = {}, const std::vector<std::int64_t>& steps = {}, const std::string &name = ""); -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" }; +const char *const EnumStrings<Aidge::Operator::Slice_Op::mAttr>::data[] = { "starts", "ends", "axes", "steps" }; } #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 72ea56dd6..0a42fdd4b 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -24,19 +24,22 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class SoftmaxAttr { Axis }; +namespace Operator { + class Softmax_Op : public OperatorTensor, public Registrable<Softmax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> { +public: +enum class mAttr { Axis }; public: static const std::string Type; private: - using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>; - template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, std::int32_t>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -54,14 +57,14 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Softmax_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); } + inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<mAttr::Axis>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -72,11 +75,13 @@ public: }; std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = ""); + +} // namespace Operator } // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"}; +const char *const EnumStrings<Aidge::Operator::Softmax_Op::Attr>::data[] = {"axis"}; } #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */ diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp index 8c3a111c4..d4624624c 100644 --- a/include/aidge/operator/Split.hpp +++ b/include/aidge/operator/Split.hpp @@ -24,24 +24,27 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class Split_OpImpl : public OperatorImpl { public: - Split_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Split_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; -enum class SplitAttr { Axis, Split }; class Split_Op : public OperatorTensor, public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> { +public: +enum class mAttr { Axis, Split }; public: static const std::string Type; private: - using Attributes_ = StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>>; - template <SplitAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, std::int8_t, std::vector<DimSize_t>>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -62,7 +65,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Split_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool dimsForwarded() const override final; bool forwardDims(bool allowDataDependency = false) override final; @@ -71,8 +74,8 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::int8_t& axis() const { return mAttributes->template getAttr<SplitAttr::Axis>(); } - inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<SplitAttr::Split>(); } + inline std::int8_t& axis() const { return mAttributes->template getAttr<mAttr::Axis>(); } + inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<mAttr::Split>(); } static const std::vector<std::string> getInputsName(){ return {"data_input", "split"}; @@ -92,11 +95,13 @@ std::shared_ptr<Node> Split(DimSize_t nbOutput, std::int8_t axis = 0, const std::vector<DimSize_t>& split = {}, const std::string &name = ""); -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "axis", "split" }; +const char *const EnumStrings<Aidge::Operator::Split_Op::mAttr>::data[] = { "axis", "split" }; } #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */ diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp index 4858cdcd1..46d0739b4 100644 --- a/include/aidge/operator/Sqrt.hpp +++ b/include/aidge/operator/Sqrt.hpp @@ -17,11 +17,13 @@ #include <string> #include "aidge/graph/Node.hpp" +#include "aidge/operator/Operator.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Sqrt_Op : public OperatorTensor, public Registrable<Sqrt_Op, @@ -42,7 +44,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Sqrt_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; std::set<std::string> getAvailableBackends() const override; @@ -56,6 +58,8 @@ public: }; std::shared_ptr<Node> Sqrt(const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */ diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp index 64a775eb4..bc31b5a22 100644 --- a/include/aidge/operator/Squeeze.hpp +++ b/include/aidge/operator/Squeeze.hpp @@ -29,6 +29,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { /** * @brief implementation of the operator squeeze. * @note Since this operator implementation is agnostic to the backend it is @@ -36,7 +37,7 @@ namespace Aidge { */ class Squeeze_OpImpl : public OperatorImpl { public: - Squeeze_OpImpl(const Operator &op, const std::string &backend = "") + Squeeze_OpImpl(const AbsOperator &op, const std::string &backend = "") : OperatorImpl(op, backend) {} void forward() override; }; @@ -110,7 +111,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::MatMul_Op */ - std::shared_ptr<Operator> clone() const override final { + std::shared_ptr<AbsOperator> clone() const override final { return std::make_shared<Squeeze_Op>(*this); } @@ -150,6 +151,7 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {}, const std::string &name = "") { return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name); } +} // namespace Operator } // namespace Aidge namespace { diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 170baf6fd..c1d7ed032 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -23,6 +23,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Sub_Op : public OperatorTensor, public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> { @@ -42,7 +43,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Sub_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool allowDataDependency = false) override final; @@ -60,6 +61,7 @@ public: std::shared_ptr<Node> Sub(const std::string& name = ""); +} // namespace Operator } // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_SUB_H_ */ diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp index f1a30e3f0..abc4296f3 100644 --- a/include/aidge/operator/Tanh.hpp +++ b/include/aidge/operator/Tanh.hpp @@ -22,6 +22,7 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { class Tanh_Op : public OperatorTensor, public Registrable<Tanh_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>> { @@ -40,7 +41,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Tanh_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; @@ -55,6 +56,8 @@ public: }; std::shared_ptr<Node> Tanh(const std::string& name = ""); -} + +} // namespace Operator +} // namespace Aidge #endif /* AIDGE_CORE_OPERATOR_TANH_H_ */ \ No newline at end of file diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index 155627f2c..447807497 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -25,26 +25,29 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + class TransposeImpl : public OperatorImpl { public: - TransposeImpl(const Operator& op, const std::string& backend = "") + TransposeImpl(const AbsOperator& op, const std::string& backend = "") : OperatorImpl(op, backend) {} void forward() override; }; -enum class TransposeAttr { OutputDimsOrder }; class Transpose_Op : public OperatorTensor, public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> { +public: +enum class mAttr { OutputDimsOrder }; public: static const std::string Type; private: - using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>; - template <TransposeAttr e> using attr = typename Attributes_::template attr<e>; + using Attributes_ = StaticAttributes<mAttr, std::vector<DimSize_t>>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -62,7 +65,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Transpose_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -70,7 +73,7 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); } + inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<mAttr::OutputDimsOrder>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -82,11 +85,13 @@ public: std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder, const std::string& name = ""); -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"}; +const char *const EnumStrings<Aidge::Operator::Transpose_Op::mAttr>::data[] = {"output_dims_order"}; } #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */ diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp index 09a689528..f43579e60 100644 --- a/include/aidge/operator/Unfold.hpp +++ b/include/aidge/operator/Unfold.hpp @@ -30,28 +30,31 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + template <DimIdx_t DIM> class Unfold_OpImpl : public OperatorImpl { public: - Unfold_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Unfold_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} void forward() override; }; -enum class UnfoldAttr { StrideDims, DilationDims, KernelDims }; template <DimIdx_t DIM> class Unfold_Op : public OperatorTensor, public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>> { +public: +enum class mAttr { StrideDims, DilationDims, KernelDims }; public: static const std::string Type; private: - using Attributes_ = StaticAttributes<UnfoldAttr, + using Attributes_ = StaticAttributes<mAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>>; - template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; public: @@ -72,7 +75,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::Unfold_Op */ - std::shared_ptr<Operator> clone() const override; + std::shared_ptr<AbsOperator> clone() const override; bool forwardDims(bool /*allowDataDependency*/ = false) override final; @@ -80,9 +83,9 @@ public: std::set<std::string> getAvailableBackends() const override; inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } - inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); } - inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<UnfoldAttr::DilationDims>(); } - inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<UnfoldAttr::KernelDims>(); } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<mAttr::DilationDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); } static const std::vector<std::string> getInputsName(){ return {"data_input"}; @@ -107,13 +110,15 @@ inline std::shared_ptr<Node> Unfold( static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported"); return Unfold(to_array(kernelDims), name, strideDims, dilationDims); } -} // namespace Aidge + +} // namespace Operator +} // namespace Aidge extern template class Aidge::Unfold_Op<2>; namespace { template <> -const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = { +const char *const EnumStrings<Aidge::Operator::Unfold_Op<2>::mAttr>::data[] = { "stride_dims", "dilation_dims", "kernel_dims" diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp index c07105405..2756581f9 100644 --- a/include/aidge/operator/Unsqueeze.hpp +++ b/include/aidge/operator/Unsqueeze.hpp @@ -26,6 +26,8 @@ #include "aidge/utils/Types.h" namespace Aidge { +namespace Operator { + /** * @brief implementation of the operator unsqueeze. * @note Since this operator implementation is agnostic to the backend it is @@ -33,20 +35,11 @@ namespace Aidge { */ class Unsqueeze_OpImpl : public OperatorImpl { public: - Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "") + Unsqueeze_OpImpl(const AbsOperator &op, const std::string &backend = "") : OperatorImpl(op, backend) {} void forward() override; }; -enum class UnsqueezeAttr { - /** - * @brief vector of axes to unsqueeze. - * values must be comprised within - * [ -a ; a-1 ] - * with a = input_tensor.nbDim() + dims_to_unsqueeze.size() - */ - Axes -}; /** * @brief This operator has as purpose to add a dummy dimension around given @@ -61,14 +54,24 @@ class Unsqueeze_Op : public OperatorTensor, public Registrable<Unsqueeze_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> { +public: +enum class mAttr { + /** + * @brief vector of axes to unsqueeze. + * values must be comprised within + * [ -a ; a-1 ] + * with a = input_tensor.nbDim() + dims_to_unsqueeze.size() + */ + Axes +}; public: static const std::string Type; // name of the type of the operation (Here "Unsqueeze") private: - using Attributes_ = StaticAttributes<UnsqueezeAttr, std::vector<int8_t>>; - template <UnsqueezeAttr e> + using Attributes_ = StaticAttributes<mAttr, std::vector<int8_t>>; + template <mAttr e> using attr = typename Attributes_::template attr<e>; const std::shared_ptr<Attributes_> mAttributes; @@ -84,7 +87,7 @@ public: : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, 1), mAttributes( - std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes))) { + std::make_shared<Attributes_>(attr<mAttr::Axes>(axes))) { mImpl = std::make_shared<Unsqueeze_OpImpl>(*this); } @@ -107,7 +110,7 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::MatMul_Op */ - std::shared_ptr<Operator> clone() const override final { + std::shared_ptr<AbsOperator> clone() const override final { return std::make_shared<Unsqueeze_Op>(*this); } @@ -131,7 +134,7 @@ public: * with : a = input_tensor.nbDim() + dims_to_unsqueeze.size() */ inline std::vector<int8_t> &axes() const noexcept { - return mAttributes->template getAttr<UnsqueezeAttr::Axes>(); + return mAttributes->template getAttr<mAttr::Axes>(); } static const std::vector<std::string> getInputsName() { @@ -148,11 +151,13 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {}, const std::string &name = "") { return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name); } + +} // namespace Operator } // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"}; +const char *const EnumStrings<Aidge::Operator::Unsqueeze_Op::mAttr>::data[] = {"Axes"}; } #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_ diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp index a9b9213e9..5b0f8c6bd 100644 --- a/include/aidge/recipes/Recipes.hpp +++ b/include/aidge/recipes/Recipes.hpp @@ -152,7 +152,7 @@ size_t convToMatMul(std::shared_ptr<GraphView> graph); /** * @brief Adapt a graph to the available kernels of a backend. - * + * * @param graph Graph to manipulate */ void adaptToBackend(std::shared_ptr<GraphView> graph); diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp index 2e397d1db..c3b11e19e 100644 --- a/include/aidge/scheduler/MemoryManager.hpp +++ b/include/aidge/scheduler/MemoryManager.hpp @@ -20,21 +20,21 @@ namespace Aidge { /** - * @brief The MemoryManager can be used to generate an optimized static memory + * @brief The MemoryManager can be used to generate an optimized static memory * layout for a computing graph in a global memory space. * The are some assumptions: - * - A MemoryManager represents a single global memory space, filled with + * - A MemoryManager represents a single global memory space, filled with * contiguous, non-overlapping MemorySpace chunks. * - A MemorySpace contains one or multiple MemoryPlane, each MemoryPlane * corresponding to the allocation of a specific Tensor. When a Tensor can re- * use the memory of the preceding one (for in-place or partially in-place - * operators), multiple overlapping MemoryPlane can be created in the same + * operators), multiple overlapping MemoryPlane can be created in the same * MemorySpace (remember, MemorySpace **cannot** be overlapping!). * - A MemoryPlane is tailored for handling (N)HWC data with two properties: * - Possibility of wrapping: on the H axis (each W*C block is contiguous). * - Possibility of concatenation: on the C axis (C1+C2+...+Cn). * - All the sizes and offets specified in a MemoryManager are expressed in - * number of data elements, or **words**, meaning currently a uniform data + * number of data elements, or **words**, meaning currently a uniform data * precision is expected in a MemoryManager (for instance, if the precision is * 16-bits, each data element will be 2 bytes, which will be the size of a word). */ @@ -79,15 +79,15 @@ public: * MemoryPlane can be non-contiguous (in case of stride, or wrapping, when * offset + size > memSpace.size). * MemoryPlane cannot be re-arranged inside a MemorySpace. - * + * * A MemoryPlane is tailored for handling (N)HWC data with two properties: * - Possibility of wrapping: on the H axis (each W*C block is contiguous). * - Possibility of concatenation: on the C axis (C1+C2+...+Cn). - * + * * Detail of (N)HWC data handling: * - \p length is the size of contiguous and non-breakable memory line (W in HWC); * - \p count is the number of memory lines of size \p length constituting a memory block (H in HWC); - * - \p stride is the number of channels, or memory blocks, *in total*, + * - \p stride is the number of channels, or memory blocks, *in total*, * of \p count lines of size \p length (C in NHWC); * - \p size is the number of channels, or memory blocks, *in this MemoryPlane*, * of \p count lines of size \p length. @@ -98,7 +98,7 @@ public: * (with an additionnal relative offset of +C1) * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks * are garanteed to be contiguous (\p length * \p stride). - * + * * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**. * In this case, \p size is the total size of the MemoryPlane (H*W*C, in words). @@ -140,7 +140,7 @@ public: /** * @brief Get the total size of the MemoryPlane, including the stride. - * + * * @return unsigned int Total size in words */ inline unsigned int getSize() const { @@ -150,7 +150,7 @@ public: /** * @brief Get the useful size of the MemoryPlane, as if its memory blocks * were contiguous, without stride. - * + * * @return unsigned int Useful size in words */ inline unsigned int getUsefulSize() const { @@ -159,7 +159,7 @@ public: /** * @brief Get the absolute offset of the beginning of the memory plane. - * + * * @return unsigned int Contiguous offset in words */ inline unsigned int getContiguousOffset() const { @@ -171,7 +171,7 @@ public: * its beginning to the limit of the MemorySpace size. * If the MemoryPlane fill the MemorySpace without wrapping, the contiguous * size will be the same as the total size of the MemoryPlane. - * + * * @return unsigned int Contiguous size in words */ inline unsigned int getContiguousSize() const { @@ -183,7 +183,7 @@ public: * Since the wrapped part of the memory plane begins at the beginning of * the MemorySpace, the returned offset is always the same as the MemorySpace * offset. - * + * * @return unsigned int Wrapped offset in words */ inline unsigned int getWrappedOffset() const { @@ -196,7 +196,7 @@ public: * including the stride. * If the MemoryPlane fill the MemorySpace without wrapping, the wrapped * size will 0. - * + * * @return unsigned int Wrapped size in words */ inline unsigned int getWrappedSize() const { @@ -207,7 +207,7 @@ public: * @brief Get the absolute offset after the end of the memory plane (if it * is wrapped, the offset will correspond to the end of the wrapped part). * The word at the final offset is not included in the MemoryPlane. - * + * * @return unsigned int Final offset in words */ inline unsigned int getFinalOffset() const { @@ -220,7 +220,7 @@ public: * @brief Get the absolute offset after the end of the contiguous part * of the memory plane. * The word at the upper offset is not included in the MemoryPlane. - * + * * @return unsigned int Upper offset in words */ inline unsigned int getUpperOffset() const { @@ -264,7 +264,7 @@ public: /// of \p count lines of size \p length (the C in NHWC). /// There should be C blocks of H*W size. unsigned int stride; - /// Size of an elementary, contiguous and non-breakable, memory line + /// Size of an elementary, contiguous and non-breakable, memory line /// (the W in NHWC), in words. A MemoryPlane wrapping cannot occur in /// the middle of a memory line. unsigned int length; diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp index a7c0ed5ae..5bb37f3e8 100644 --- a/include/aidge/scheduler/ProdConso.hpp +++ b/include/aidge/scheduler/ProdConso.hpp @@ -19,17 +19,17 @@ #include "aidge/data/Elts.hpp" namespace Aidge { -class Operator; +class AbsOperator; class ProdConso { public: - ProdConso(const Operator& op, bool inPlace = false); + ProdConso(const AbsOperator& op, bool inPlace = false); - static std::unique_ptr<ProdConso> defaultModel(const Operator& op) { + static std::unique_ptr<ProdConso> defaultModel(const AbsOperator& op) { return std::make_unique<ProdConso>(op, false); } - static std::unique_ptr<ProdConso> inPlaceModel(const Operator& op) { + static std::unique_ptr<ProdConso> inPlaceModel(const AbsOperator& op) { return std::make_unique<ProdConso>(op, true); } @@ -79,7 +79,7 @@ public: virtual ~ProdConso() = default; protected: - const Operator &mOp; + const AbsOperator &mOp; const bool mInPlace; std::vector<Elts_t> mNbConsumedData; std::vector<Elts_t> mNbProducedData; diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp index 52056852b..630441b06 100644 --- a/include/aidge/utils/DynamicAttributes.hpp +++ b/include/aidge/utils/DynamicAttributes.hpp @@ -352,7 +352,7 @@ struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUt size_t hash(const future_std::any& attr) const override final { // Here we are mixing Python and C++ hashes... if both are - // well implemented, this should not increase the collision + // well implemented, this should not increase the collision // probability for the same number of stored hashes. return py::hash(future_std::any_cast<py::object>(attr)); } diff --git a/include/aidge/utils/Random.hpp b/include/aidge/utils/Random.hpp index 73cbd1453..741eb93f4 100644 --- a/include/aidge/utils/Random.hpp +++ b/include/aidge/utils/Random.hpp @@ -15,8 +15,8 @@ #include <algorithm> #include <random> #include <vector> -namespace Aidge { +namespace Aidge { namespace Random { /** @@ -55,7 +55,7 @@ inline void randShuffle(std::vector<unsigned int>& vec) { std::shuffle(vec.begin(), vec.end(), Aidge::Random::Generator::get()); } -} // namespace Random -} // namespace Aidge +} // namespace Random +} // namespace Aidge #endif // AIDGE_RANDOM_H_ diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp index 636863e29..8abb46b05 100644 --- a/include/aidge/utils/StaticAttributes.hpp +++ b/include/aidge/utils/StaticAttributes.hpp @@ -317,7 +317,7 @@ private: return false; } - + template<std::size_t I = 0, typename... Tp> inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {} diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp index 04172c3ff..07ba7a18a 100644 --- a/python_binding/backend/pybind_OperatorImpl.cpp +++ b/python_binding/backend/pybind_OperatorImpl.cpp @@ -84,7 +84,7 @@ void init_OperatorImpl(py::module& m){ ; py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr()) - .def(py::init<const Operator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>()) + .def(py::init<const AbsOperator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>()) .def("forward", &OperatorImpl::forward) .def("backward", &OperatorImpl::backward) .def("prod_conso", &OperatorImpl::prodConso) diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp index 77abd1f39..3735ce55f 100644 --- a/python_binding/data/pybind_DataProvider.cpp +++ b/python_binding/data/pybind_DataProvider.cpp @@ -31,6 +31,6 @@ void init_DataProvider(py::module& m){ .def("__iter__", &DataProvider::iter) .def("__next__", &DataProvider::next) .def("__len__", &DataProvider::getNbBatch); - + } } diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp index 4bc28a19d..79db4d24e 100644 --- a/python_binding/data/pybind_Database.cpp +++ b/python_binding/data/pybind_Database.cpp @@ -37,4 +37,4 @@ void init_Database(py::module& m) { .def("len", &Database::getLen) .def("get_nb_modalities", &Database::getNbModalities); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/data/pybind_TensorImpl.cpp b/python_binding/data/pybind_TensorImpl.cpp index 4c664274e..ef925a52f 100644 --- a/python_binding/data/pybind_TensorImpl.cpp +++ b/python_binding/data/pybind_TensorImpl.cpp @@ -29,7 +29,7 @@ void init_TensorImpl(py::module& m){ py::class_<TensorImpl_cpu<double>, std::shared_ptr<TensorImpl_cpu<double>>, TensorImpl>(m, "TensorImpl_cpu_float64") .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>()); - + py::class_<TensorImpl_cpu<float>, std::shared_ptr<TensorImpl_cpu<float>>, TensorImpl>(m, "TensorImpl_cpu_float32") .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>()); diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp index a85c0d6cd..1f2068914 100644 --- a/python_binding/filler/pybind_Filler.cpp +++ b/python_binding/filler/pybind_Filler.cpp @@ -144,4 +144,4 @@ void init_Filler(py::module &m) { py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0) ; } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp index cd9b2a16f..567a1e2fa 100644 --- a/python_binding/graph/pybind_GraphView.cpp +++ b/python_binding/graph/pybind_GraphView.cpp @@ -148,9 +148,9 @@ void init_GraphView(py::module& m) { // }) .def("get_ranked_nodes", &GraphView::getRankedNodes) .def("set_dataformat", &GraphView::setDataFormat, py::arg("dataformat")) - + ; m.def("get_connected_graph_view", &getConnectedGraphView); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp index d8e77bb25..442c167d7 100644 --- a/python_binding/graph/pybind_Node.cpp +++ b/python_binding/graph/pybind_Node.cpp @@ -23,7 +23,7 @@ namespace py = pybind11; namespace Aidge { void init_Node(py::module& m) { py::class_<Node, std::shared_ptr<Node>>(m, "Node") - .def(py::init<std::shared_ptr<Operator>, const std::string&>(), py::arg("op"), py::arg("name") = "") + .def(py::init<std::shared_ptr<AbsOperator>, const std::string&>(), py::arg("op"), py::arg("name") = "") .def("name", &Node::name, R"mydelimiter( Name of the Node. @@ -36,7 +36,7 @@ void init_Node(py::module& m) { .def("get_operator", &Node::getOperator, R"mydelimiter( - Get the Operator object of the Node. + Get the AbsOperator object of the Node. )mydelimiter") .def("set_name", &Node::setName, py::arg("name"), @@ -48,7 +48,7 @@ void init_Node(py::module& m) { :rtype: str )mydelimiter") - .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), + .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), R"mydelimiter( Given a base name, generate a new name which is unique in all the GraphViews containing this node. @@ -190,4 +190,4 @@ void init_Node(py::module& m) { return self(connectors); }); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp index 08dddfc81..56b52b26e 100644 --- a/python_binding/operator/pybind_And.cpp +++ b/python_binding/operator/pybind_And.cpp @@ -31,4 +31,4 @@ void init_And(py::module& m) { :param name : name of the node. )mydelimiter"); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp index 3de54afd7..4ad5cb6fb 100644 --- a/python_binding/operator/pybind_ArgMax.cpp +++ b/python_binding/operator/pybind_ArgMax.cpp @@ -30,13 +30,13 @@ void init_ArgMax(py::module &m) { m, pyClassName.c_str(), py::multiple_inheritance(), R"mydelimiter( Initialize an ArgMax operator. - :param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], + :param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], where r is the rank of the input tensor. :type axis: int - :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, + :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, the reduced dimensions are removed. :type keepdims: bool - :param select_last_index: If True, selects the last index if there are multiple occurrences + :param select_last_index: If True, selects the last index if there are multiple occurrences of the max value. If False (default), selects the first occurrence. :type select_last_index: bool )mydelimiter") @@ -57,13 +57,13 @@ void init_ArgMax(py::module &m) { py::arg("name") = "", R"mydelimiter( Initialize a node containing an ArgMax operator. - :param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], + :param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], where r is the rank of the input tensor. :type axis: int - :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, + :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, the reduced dimensions are removed. :type keepdims: bool - :param select_last_index: If True, selects the last index if there are multiple occurrences + :param select_last_index: If True, selects the last index if there are multiple occurrences of the max value. If False (default), selects the first occurrence. :type select_last_index: bool :param name : name of the node. diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp index 9a1bdacd1..c449fcd58 100644 --- a/python_binding/operator/pybind_BatchNorm.cpp +++ b/python_binding/operator/pybind_BatchNorm.cpp @@ -42,4 +42,4 @@ void init_BatchNorm(py::module &m) { declare_BatchNormOp<2>(m); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp index b4f6c90e5..43f85947a 100644 --- a/python_binding/operator/pybind_BitShift.cpp +++ b/python_binding/operator/pybind_BitShift.cpp @@ -25,8 +25,8 @@ void init_BitShift(py::module &m) { // Binding for BitShiftOp class auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter( BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements. - This class allows shifting tensor values either to the left or right based on the - specified direction. The direction can be accessed and controlled using the + This class allows shifting tensor values either to the left or right based on the + specified direction. The direction can be accessed and controlled using the BitShiftDirection enum. :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right) :type direction: BitShiftDirection @@ -47,8 +47,8 @@ void init_BitShift(py::module &m) { m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right, py::arg("name") = "", R"mydelimiter( BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements. - This class allows shifting tensor values either to the left or right based on the - specified direction. The direction can be accessed and controlled using the + This class allows shifting tensor values either to the left or right based on the + specified direction. The direction can be accessed and controlled using the BitShiftDirection enum. :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right) :type direction: BitShiftDirection diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp index 854f3783e..29f937155 100644 --- a/python_binding/operator/pybind_Concat.cpp +++ b/python_binding/operator/pybind_Concat.cpp @@ -33,4 +33,4 @@ void init_Concat(py::module& m) { m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp index d2ad60725..17dcb558d 100644 --- a/python_binding/operator/pybind_Div.cpp +++ b/python_binding/operator/pybind_Div.cpp @@ -28,4 +28,4 @@ void init_Div(py::module& m) { m.def("Div", &Div, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp index 6ca25f956..581adf986 100644 --- a/python_binding/operator/pybind_Erf.cpp +++ b/python_binding/operator/pybind_Erf.cpp @@ -29,4 +29,4 @@ void init_Erf(py::module& m) { m.def("Erf", &Erf, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp index 0aac0bbad..70ea1cb25 100644 --- a/python_binding/operator/pybind_Gather.cpp +++ b/python_binding/operator/pybind_Gather.cpp @@ -37,4 +37,4 @@ void init_Gather(py::module& m) { m.def("Gather", &Gather, py::arg("axis") = 0, py::arg("indices") = std::vector<std::int64_t>(), py::arg("gathered_shape") = std::vector<std::size_t>(), py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp index 6af8fef88..96d00543d 100644 --- a/python_binding/operator/pybind_GenericOperator.cpp +++ b/python_binding/operator/pybind_GenericOperator.cpp @@ -65,4 +65,4 @@ void init_GenericOperator(py::module& m) { return genericNode; }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp index 759919722..ac7491976 100644 --- a/python_binding/operator/pybind_Identity.cpp +++ b/python_binding/operator/pybind_Identity.cpp @@ -28,4 +28,4 @@ void init_Identity(py::module& m) { m.def("Identity", &Identity, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp index e031d3dfb..b0fa40646 100644 --- a/python_binding/operator/pybind_LeakyReLU.cpp +++ b/python_binding/operator/pybind_LeakyReLU.cpp @@ -28,4 +28,4 @@ void init_LeakyReLU(py::module& m) { m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp index 50aa75582..ae17f6910 100755 --- a/python_binding/operator/pybind_Ln.cpp +++ b/python_binding/operator/pybind_Ln.cpp @@ -27,4 +27,4 @@ void init_Ln(py::module& m) { m.def("Ln", &Ln, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp index 3ac112211..5d50f12eb 100644 --- a/python_binding/operator/pybind_Memorize.cpp +++ b/python_binding/operator/pybind_Memorize.cpp @@ -30,4 +30,4 @@ void init_Memorize(py::module& m) { m.def("Memorize", &Memorize, py::arg("end_step"), py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp index 23949b5fe..9052f169e 100644 --- a/python_binding/operator/pybind_Mul.cpp +++ b/python_binding/operator/pybind_Mul.cpp @@ -27,4 +27,4 @@ void init_Mul(py::module& m) { declare_registrable<Mul_Op>(m, "MulOp"); m.def("Mul", &Mul, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp index e22f88687..a999df0b7 100644 --- a/python_binding/operator/pybind_Operator.cpp +++ b/python_binding/operator/pybind_Operator.cpp @@ -34,35 +34,35 @@ void init_Operator(py::module& m){ .value("OptionalData", InputCategory::OptionalData) .value("OptionalParam", InputCategory::OptionalParam); - py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator") - .def("__repr__", &Operator::repr) - .def("backend", &Operator::backend) - .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data")) - .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data")) - .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx")) - .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data")) - .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx")) - .def("nb_inputs", &Operator::nbInputs) - .def("nb_outputs", &Operator::nbOutputs) - .def("input_category", &Operator::inputCategory, py::arg("idx"), + py::class_<AbsOperator, std::shared_ptr<AbsOperator>>(m, "AbsOperator") + .def("__repr__", &AbsOperator::repr) + .def("backend", &AbsOperator::backend) + .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&AbsOperator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data")) + .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&AbsOperator::setInput), py::arg("inputIdx"), py::arg("data")) + .def("get_raw_output", &AbsOperator::getRawOutput, py::arg("outputIdx")) + .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&AbsOperator::setInput), py::arg("inputIdx"), py::arg("data")) + .def("get_raw_input", &AbsOperator::getRawInput, py::arg("inputIdx")) + .def("nb_inputs", &AbsOperator::nbInputs) + .def("nb_outputs", &AbsOperator::nbOutputs) + .def("input_category", &AbsOperator::inputCategory, py::arg("idx"), R"mydelimiter( Category of a specific input (Data or Param, optional or not). Data inputs exclude inputs expecting parameters (weights or bias). :rtype: InputCategory )mydelimiter") - .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data")) - .def("set_datatype", &Operator::setDataType, py::arg("dataType")) - .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&Operator::setBackend), py::arg("name"), py::arg("device") = 0) - .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&Operator::setBackend), py::arg("backends")) - .def("forward", &Operator::forward) - // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted ! - .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>()) - .def("type", &Operator::type) - .def("get_impl", &Operator::getImpl) - .def_property_readonly("attr", &Operator::attributes) - .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes")) - .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index")) + .def("associate_input", &AbsOperator::associateInput, py::arg("inputIdx"), py::arg("data")) + .def("set_datatype", &AbsOperator::setDataType, py::arg("dataType")) + .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&AbsOperator::setBackend), py::arg("name"), py::arg("device") = 0) + .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&AbsOperator::setBackend), py::arg("backends")) + .def("forward", &AbsOperator::forward) + // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the AbsOperator is not deleted ! + .def("set_impl", &AbsOperator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>()) + .def("type", &AbsOperator::type) + .def("get_impl", &AbsOperator::getImpl) + .def_property_readonly("attr", &AbsOperator::attributes) + .def("set_back_edges", &AbsOperator::setBackEdges, py::arg("input_indexes")) + .def("is_back_edge", &AbsOperator::isBackEdge, py::arg("input_index")) ; } } diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp index 8c515e321..b140f89b8 100644 --- a/python_binding/operator/pybind_OperatorTensor.cpp +++ b/python_binding/operator/pybind_OperatorTensor.cpp @@ -24,7 +24,7 @@ namespace py = pybind11; namespace Aidge { void init_OperatorTensor(py::module& m){ - py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor") + py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, AbsOperator>(m, "OperatorTensor") .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx")) .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx")) diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp index 2040f642b..0279d44b8 100644 --- a/python_binding/operator/pybind_Pop.cpp +++ b/python_binding/operator/pybind_Pop.cpp @@ -27,4 +27,4 @@ void init_Pop(py::module& m) { m.def("Pop", &Pop, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp index ec29e3faa..2d75f9822 100644 --- a/python_binding/operator/pybind_Pow.cpp +++ b/python_binding/operator/pybind_Pow.cpp @@ -28,4 +28,4 @@ void init_Pow(py::module& m) { m.def("Pow", &Pow, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp index 79720845c..c60bf4f00 100644 --- a/python_binding/operator/pybind_ReLU.cpp +++ b/python_binding/operator/pybind_ReLU.cpp @@ -28,4 +28,4 @@ void init_ReLU(py::module& m) { m.def("ReLU", &ReLU, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp index 028e45755..70323c2ba 100644 --- a/python_binding/operator/pybind_ReduceMean.cpp +++ b/python_binding/operator/pybind_ReduceMean.cpp @@ -30,13 +30,13 @@ void declare_ReduceMeanOp(py::module &m) { m, pyClassName.c_str(), py::multiple_inheritance(), R"mydelimiter( Initialize a ReduceMean operator. - :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], + :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], where r is the rank of the input tensor. :type axes: List[int] - :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, + :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, the reduced dimensions are removed. :type keepdims: bool - :param noop_with_empty_axes: If True, the operator just copies the input, + :param noop_with_empty_axes: If True, the operator just copies the input, if False, the operatpr reduces all the dimensions. :type noop_with_empty_axes: bool )mydelimiter") @@ -60,13 +60,13 @@ void declare_ReduceMeanOp(py::module &m) { py::arg("name") = "", R"mydelimiter( Initialize a node containing a ReduceMean operator. - :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], + :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], where r is the rank of the input tensor. :type axes: List[int] - :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, + :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, the reduced dimensions are removed. :type keepdims: bool - :param noop_with_empty_axes: If True, the operator just copies the input, + :param noop_with_empty_axes: If True, the operator just copies the input, if False, the operatpr reduces all the dimensions. :type noop_with_empty_axes: bool :param name : name of the node. diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp index eaa57ef1c..7ff6e1508 100644 --- a/python_binding/operator/pybind_ReduceSum.cpp +++ b/python_binding/operator/pybind_ReduceSum.cpp @@ -30,13 +30,13 @@ void init_ReduceSum(py::module &m) { m, pyClassName.c_str(), py::multiple_inheritance(), R"mydelimiter( Initialize a ReduceMean operator. - :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], + :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], where r is the rank of the input tensor. :type axes: List[int] - :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, + :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, the reduced dimensions are removed. :type keepdims: bool - :param noop_with_empty_axes: If True, the operator just copies the input, + :param noop_with_empty_axes: If True, the operator just copies the input, if False, the operatpr reduces all the dimensions. :type noop_with_empty_axes: bool )mydelimiter") @@ -57,13 +57,13 @@ void init_ReduceSum(py::module &m) { py::arg("name") = "", R"mydelimiter( Initialize a node containing a ReduceMean operator. - :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], + :param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], where r is the rank of the input tensor. :type axes: List[int] - :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, + :param keepdims: If True (default), retains the reduced dimensions with size 1. If False, the reduced dimensions are removed. :type keepdims: bool - :param noop_with_empty_axes: If True, the operator just copies the input, + :param noop_with_empty_axes: If True, the operator just copies the input, if False, the operatpr reduces all the dimensions. :type noop_with_empty_axes: bool :param name : name of the node. diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp index c0b0e8c30..9c05a29fd 100644 --- a/python_binding/operator/pybind_Reshape.cpp +++ b/python_binding/operator/pybind_Reshape.cpp @@ -27,4 +27,4 @@ void init_Reshape(py::module& m) { declare_registrable<Reshape_Op>(m, "ReshapeOp"); m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp index 35321f525..755ec6078 100644 --- a/python_binding/operator/pybind_Resize.cpp +++ b/python_binding/operator/pybind_Resize.cpp @@ -27,4 +27,4 @@ void init_Resize(py::module& m) { m.def("Resize", &Resize, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp index e9ed0e473..743dc2fcb 100644 --- a/python_binding/operator/pybind_Round.cpp +++ b/python_binding/operator/pybind_Round.cpp @@ -26,11 +26,11 @@ void init_Round(py::module& m) { declare_registrable<Round_Op>(m, "RoundOp"); m.def("Round", &Round, py::arg("name") = "", R"mydelimiter( RoundOp is a tensor operator that rounds the values of a tensor element-wise. - This class rounds each value to the nearest integer. In the case of halves, + This class rounds each value to the nearest integer. In the case of halves, the rule is to round them to the nearest even integer. :param X: input tensor. :type X: tensor of type float, double, float16, or bfloat16. :param Y: output tensor with the same shape and type as the input tensor. )mydelimiter"); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp index 22e8011a9..25c5560c2 100644 --- a/python_binding/operator/pybind_Scaling.cpp +++ b/python_binding/operator/pybind_Scaling.cpp @@ -30,4 +30,4 @@ void init_Scaling(py::module& m) m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp index b3511f31e..c00d7f916 100644 --- a/python_binding/operator/pybind_Shape.cpp +++ b/python_binding/operator/pybind_Shape.cpp @@ -34,4 +34,4 @@ void init_Shape(py::module& m) { m.def("Shape", &Shape, py::arg("start") = 0, py::arg("end") = -1, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp index db7fc7bfb..b2053234a 100644 --- a/python_binding/operator/pybind_Sigmoid.cpp +++ b/python_binding/operator/pybind_Sigmoid.cpp @@ -27,4 +27,4 @@ void init_Sigmoid(py::module& m) { m.def("Sigmoid", &Sigmoid, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp index c8cae2592..8c52c01e1 100644 --- a/python_binding/operator/pybind_Slice.cpp +++ b/python_binding/operator/pybind_Slice.cpp @@ -42,4 +42,4 @@ void init_Slice(py::module& m) { py::arg("steps") = std::vector<std::int64_t>(), py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp index 3b98ab9df..c5cd6a234 100644 --- a/python_binding/operator/pybind_Softmax.cpp +++ b/python_binding/operator/pybind_Softmax.cpp @@ -28,4 +28,4 @@ void init_Softmax(py::module& m) { declare_registrable<Softmax_Op>(m, "SoftmaxOp"); m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp index 9b3feda9f..dd56ab78b 100644 --- a/python_binding/operator/pybind_Split.cpp +++ b/python_binding/operator/pybind_Split.cpp @@ -35,4 +35,4 @@ void init_Split(py::module& m) { m.def("Split", &Split, py::arg("nb_outputs"), py::arg("axis") = 0, py::arg("split") = std::vector<DimSize_t>(), py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp index ba0c5aab0..6eacacaaf 100644 --- a/python_binding/operator/pybind_Sqrt.cpp +++ b/python_binding/operator/pybind_Sqrt.cpp @@ -26,4 +26,4 @@ void init_Sqrt(py::module& m) { declare_registrable<Sqrt_Op>(m, "SqrtOp"); m.def("Sqrt", &Sqrt, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp index ca90fb46a..5826797da 100644 --- a/python_binding/operator/pybind_Squeeze.cpp +++ b/python_binding/operator/pybind_Squeeze.cpp @@ -27,7 +27,7 @@ void init_Squeeze(py::module &m) { m, "SqueezeOp", py::multiple_inheritance(), R"mydelimiter( Initialize squeeze operator - :param axes : axes to squeeze between [-r;r-1] + :param axes : axes to squeeze between [-r;r-1] with r = input_tensor.nbDims() & r in [-128 , 127] :type axes : :py:class: List[Int] @@ -42,7 +42,7 @@ void init_Squeeze(py::module &m) { py::arg("name") = "", R"mydelimiter( Initialize a node containing a squeeze operator. - :param axes : axes to squeeze between [-r;r-1] + :param axes : axes to squeeze between [-r;r-1] with r = input_tensor.nbDims() & r in [-128 , 127] :type axes : :py:class: List[Int] diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp index 52a622f0f..f692b12b7 100644 --- a/python_binding/operator/pybind_Sub.cpp +++ b/python_binding/operator/pybind_Sub.cpp @@ -27,4 +27,4 @@ void init_Sub(py::module& m) { declare_registrable<Sub_Op>(m, "SubOp"); m.def("Sub", &Sub, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp index ded15ee78..7c802eb7d 100644 --- a/python_binding/operator/pybind_Tanh.cpp +++ b/python_binding/operator/pybind_Tanh.cpp @@ -27,4 +27,4 @@ void init_Tanh(py::module& m) { m.def("Tanh", &Tanh, py::arg("name") = ""); } -} // namespace Aidge +} // namespace Aidge diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp index 40c179c40..6c63d66cb 100644 --- a/python_binding/operator/pybind_Unsqueeze.cpp +++ b/python_binding/operator/pybind_Unsqueeze.cpp @@ -24,7 +24,7 @@ void init_Unsqueeze(py::module &m) { m, "UnsqueezeOp", py::multiple_inheritance(), R"mydelimiter( Initialize an unsqueeze operator. - :param axes : axes to unsqueeze between [-r;r-1] + :param axes : axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes) :type axes : :py:class: List[Int] )mydelimiter") @@ -39,7 +39,7 @@ void init_Unsqueeze(py::module &m) { py::arg("name") = "", R"mydelimiter( Initialize a node containing an unsqueeze operator. - :param axes : axes to unsqueeze between [-r;r-1] + :param axes : axes to unsqueeze between [-r;r-1] with r = input_tensor.nbDims() + len(axes) :type axes : :py:class: List[Int] :param name : name of the node. diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index bac071e02..27fe28936 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -172,6 +172,6 @@ void init_Aidge(py::module& m) { init_Filler(m); } -} // namespace Aidge +} // namespace Aidge PYBIND11_MODULE(aidge_core, m) { Aidge::init_Aidge(m); } diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp index 6908cbd91..5db376ba6 100644 --- a/python_binding/recipes/pybind_Recipes.cpp +++ b/python_binding/recipes/pybind_Recipes.cpp @@ -113,7 +113,7 @@ void init_Recipes(py::module &m) )mydelimiter"); m.def("fuse_to_metaops", fuseToMetaOps, py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter( - Fuse each sub-graph matching a query in a Meta Operator. + Fuse each sub-graph matching a query in a Meta AbsOperator. :param graph_view: Graph view on which we want to apply the recipe :type graph_view: :py:class:`aidge_core.GraphView` @@ -121,7 +121,7 @@ void init_Recipes(py::module &m) :type query: str :param type: Type name of the resulting meta operators :type type: str, optional - :return: Number of sub-graph actually fused in a Meta Operator. + :return: Number of sub-graph actually fused in a Meta AbsOperator. :rtype: int )mydelimiter"); diff --git a/python_binding/scheduler/pybind_MemoryManager.cpp b/python_binding/scheduler/pybind_MemoryManager.cpp index 0f18db405..9e5bd6601 100644 --- a/python_binding/scheduler/pybind_MemoryManager.cpp +++ b/python_binding/scheduler/pybind_MemoryManager.cpp @@ -36,10 +36,10 @@ void init_MemoryManager(py::module& m) .def_readwrite("released", &MemoryManager::MemorySpace::released); py::class_<MemoryManager::MemoryPlane, std::shared_ptr<MemoryManager::MemoryPlane>>(m, "MemoryPlane") - .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>, + .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>, MemoryManager::Clock_T, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int>(), - py::arg("mem_space"), py::arg("clock"), py::arg("offset"), + py::arg("mem_space"), py::arg("clock"), py::arg("offset"), py::arg("size"), py::arg("stride"), py::arg("length"), py::arg("count")) .def_readwrite("mem_space", &MemoryManager::MemoryPlane::memSpace) .def_readwrite("allocated", &MemoryManager::MemoryPlane::allocated) diff --git a/python_binding/scheduler/pybind_ProdConso.cpp b/python_binding/scheduler/pybind_ProdConso.cpp index abd6d5379..863fd4a39 100644 --- a/python_binding/scheduler/pybind_ProdConso.cpp +++ b/python_binding/scheduler/pybind_ProdConso.cpp @@ -101,7 +101,7 @@ public: void init_ProdConso(py::module& m){ py::class_<ProdConso, std::shared_ptr<ProdConso>, pyProdConso>(m, "ProdConso", py::dynamic_attr()) - .def(py::init<const Operator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>()) + .def(py::init<const AbsOperator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>()) .def_static("default_model", &ProdConso::defaultModel) .def_static("in_place_model", &ProdConso::inPlaceModel) .def("get_nb_required_data", &ProdConso::getNbRequiredData) diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp index ca8d1f330..663611319 100644 --- a/python_binding/utils/pybind_Log.cpp +++ b/python_binding/utils/pybind_Log.cpp @@ -78,13 +78,13 @@ void init_Log(py::module& m){ .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"), R"mydelimiter( Set the minimum log level displayed in the console. - Available `Level`s in ascending order : + Available `Level`s in ascending order : - Level.Debug - Level.Info - Level.Notice - Level.Warn - Level.Error - - Level.Fatal + - Level.Fatal :param level: Log level. :type level: Level @@ -100,13 +100,13 @@ void init_Log(py::module& m){ .def_static("set_file_level", &Log::setFileLevel, py::arg("level"), R"mydelimiter( Set the minimum log level saved in the log file. - Available `Level`s in ascending order : + Available `Level`s in ascending order : - Level.Debug - Level.Info - Level.Notice - Level.Warn - Level.Error - - Level.Fatal + - Level.Fatal :param level: Log level. :type level: Level diff --git a/python_binding/utils/pybind_Random.cpp b/python_binding/utils/pybind_Random.cpp index a1956d2d1..1a112317f 100644 --- a/python_binding/utils/pybind_Random.cpp +++ b/python_binding/utils/pybind_Random.cpp @@ -21,4 +21,4 @@ void init_Random(py::module &m) { py::class_<Random::Generator>(mRand, "Generator") .def_static("set_seed", Random::Generator::setSeed); } -} // namespace Aidge +} // namespace Aidge diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp index e2215e704..0c99ba650 100644 --- a/src/backend/OperatorImpl.cpp +++ b/src/backend/OperatorImpl.cpp @@ -33,7 +33,7 @@ Aidge::ImplSpec::ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec Aidge::ImplSpec::ImplSpec(const Aidge::ImplSpec&) = default; Aidge::ImplSpec::~ImplSpec() noexcept = default; -Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend): +Aidge::OperatorImpl::OperatorImpl(const AbsOperator& op, const std::string& backend): mOp(op), mBackend(backend) { diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index c19eab12a..e967691a1 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -19,7 +19,7 @@ #include "aidge/operator/Producer.hpp" #include "aidge/utils/Types.h" -Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs) +Aidge::Node::Node(std::shared_ptr<AbsOperator> op, std::shared_ptr<DynamicAttributes> attrs) : mAttrs(attrs), mOperator(op), mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()), @@ -38,10 +38,10 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttribute } } -Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs) +Aidge::Node::Node(std::shared_ptr<AbsOperator> op, const DynamicAttributes& attrs) : Node(op, std::make_shared<DynamicAttributes>(attrs)) {} -Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name) +Aidge::Node::Node(std::shared_ptr<AbsOperator> op, const std::string& name) : Node(op, DynamicAttributes()) { // ctor @@ -415,7 +415,7 @@ Aidge::NodePtr Aidge::Node::cloneSharedOperators() const { } Aidge::NodePtr Aidge::Node::cloneSharedProducers() const { - std::shared_ptr<Operator> op = + std::shared_ptr<AbsOperator> op = (mOperator->type() == Producer_Op::Type) ? mOperator : mOperator->clone(); return std::make_shared<Node>(op, mAttrs); diff --git a/src/graphRegex/matchFsm/FsmGraph.cpp b/src/graphRegex/matchFsm/FsmGraph.cpp index a56474e04..5e9b79ecb 100644 --- a/src/graphRegex/matchFsm/FsmGraph.cpp +++ b/src/graphRegex/matchFsm/FsmGraph.cpp @@ -10,7 +10,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){ //TODO std::vector<std::shared_ptr<MatchSolution>> FsmGraph::test(const std::vector<NodePtr>& startNodes){ - + std::vector<std::shared_ptr<Aidge::FsmNode>> startNodesFsm = getStartNodes(); if(startNodes.size() != startNodesFsm.size()){ throw std::runtime_error("bad number of Start nodes"); @@ -61,7 +61,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){ walks.swap(nextWalks); nextWalks.clear(); } - + MatchResult allMatch(allValidContext,getNbSubFsm(),mQuery,startNodes); return allMatch.getSolutions(); diff --git a/src/nodeTester/ConditionalInterpreter.cpp b/src/nodeTester/ConditionalInterpreter.cpp index f40e62305..7d3700079 100644 --- a/src/nodeTester/ConditionalInterpreter.cpp +++ b/src/nodeTester/ConditionalInterpreter.cpp @@ -28,16 +28,16 @@ using namespace Aidge; ConditionalParser conditionalParser = ConditionalParser(ConditionalExpressions); mTree = conditionalParser.parse(); - + ///lambda by default mLambdaRegister.insert("getType",+[](NodePtr NodeOp){return NodeOp->type();}); } - + bool ConditionalInterpreter::isLambdaRegister(const std::string &key){ return mLambdaRegister.isLambdaRegister(key); } - + const std::string& ConditionalInterpreter::getKey(){ return mKey; } @@ -48,7 +48,7 @@ using namespace Aidge; mResolution.clear(); try{ std::vector< std::shared_ptr<ConditionalData>> r = visit({mTree},nodeOp); - + if (mResolution.size() != 1){ throw std::runtime_error("Multi output interpretation output"); }else{ @@ -179,7 +179,7 @@ using namespace Aidge; }catch(const std::exception& e){ std::ostringstream errorMessage; errorMessage << "Error in visiting AST for node "<< nodeOp->name() << "\n\t" << e.what() << "\n"; - throw std::runtime_error(errorMessage.str()); + throw std::runtime_error(errorMessage.str()); } } @@ -236,11 +236,11 @@ using namespace Aidge; if (mResolution.size() < 2){ throw std::runtime_error("EQ need 2 arg and get :" + std::to_string(mResolution.size())); } - auto a = mResolution.back(); + auto a = mResolution.back(); mResolution.pop_back(); - auto b = mResolution.back(); + auto b = mResolution.back(); mResolution.pop_back(); - + if (a->getType() != b->getType()){ throw std::runtime_error("EQ Unsupported between type :" + a->getType() +" "+ b->getType()); @@ -262,7 +262,7 @@ using namespace Aidge; throw std::runtime_error("EQ Unknown type encountered :" + a->getType() ); } - + mResolution.push_back(data); } @@ -271,9 +271,9 @@ using namespace Aidge; if (mResolution.size() < 2){ throw std::runtime_error("NEQ need 2 arg and get :" + std::to_string(mResolution.size())); } - auto a = mResolution.back(); + auto a = mResolution.back(); mResolution.pop_back(); - auto b = mResolution.back(); + auto b = mResolution.back(); mResolution.pop_back(); if (a->getType() != b->getType()){ @@ -293,7 +293,7 @@ using namespace Aidge; throw std::runtime_error("NEQ Unknown type encountered :" + a->getType() ); } - + mResolution.push_back(data); } @@ -302,9 +302,9 @@ using namespace Aidge; if (mResolution.size() < 2){ throw std::runtime_error("AND need 2 arg and get :" + std::to_string(mResolution.size())); } - auto a = mResolution.back(); + auto a = mResolution.back(); mResolution.pop_back(); - auto b = mResolution.back(); + auto b = mResolution.back(); mResolution.pop_back(); @@ -316,7 +316,7 @@ using namespace Aidge; data->setValue<bool>( a->getValue<bool>() && b->getValue<bool>()); - + mResolution.push_back(data); } @@ -325,9 +325,9 @@ using namespace Aidge; if (mResolution.size() < 2){ throw std::runtime_error("OR need 2 arg and get :" + std::to_string(mResolution.size())); } - auto a = mResolution.back(); + auto a = mResolution.back(); mResolution.pop_back(); - auto b = mResolution.back(); + auto b = mResolution.back(); mResolution.pop_back(); @@ -339,7 +339,7 @@ using namespace Aidge; data->setValue<bool>( a->getValue<bool>() || b->getValue<bool>()); - + mResolution.push_back(data); } @@ -348,7 +348,7 @@ using namespace Aidge; if (mResolution.size() < 1){ throw std::runtime_error("NOT need 1 arg and get :" + std::to_string(mResolution.size())); } - auto a = mResolution.back(); + auto a = mResolution.back(); mResolution.pop_back(); if (a->getType() != typeid(bool).name()){ @@ -358,7 +358,7 @@ using namespace Aidge; std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>(); data->setValue<bool>( !a->getValue<bool>() ); - + mResolution.push_back(data); } diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index 033c476c8..86fd1ad9c 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -40,7 +40,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Add_Op::clone() const { return std::make_shared<Add_Op>(*this); } diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp index f8c8e5e3f..46b2295d3 100644 --- a/src/operator/AvgPooling.cpp +++ b/src/operator/AvgPooling.cpp @@ -40,7 +40,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op) } template <Aidge::DimIdx_t DIM> -std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::AvgPooling_Op<DIM>::clone() const { return std::make_shared<AvgPooling_Op<DIM>>(*this); } diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp index bcf3b29c4..263221247 100644 --- a/src/operator/BatchNorm.cpp +++ b/src/operator/BatchNorm.cpp @@ -39,7 +39,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op) } template <Aidge::DimIdx_t DIM> -std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::BatchNorm_Op<DIM>::clone() const { return std::make_shared<BatchNorm_Op<DIM>>(*this); } diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp index 55efdd51d..5448ebf58 100644 --- a/src/operator/Concat.cpp +++ b/src/operator/Concat.cpp @@ -43,7 +43,7 @@ Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Concat_Op::clone() const { return std::make_shared<Concat_Op>(*this); } diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp index 6b8d05625..23ca0ae72 100644 --- a/src/operator/DepthToSpace.cpp +++ b/src/operator/DepthToSpace.cpp @@ -79,7 +79,7 @@ Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::DepthToSpace_Op::clone() const { return std::make_shared<DepthToSpace_Op>(*this); } diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp index bd5f76f8a..e7196c16e 100644 --- a/src/operator/Erf.cpp +++ b/src/operator/Erf.cpp @@ -29,7 +29,7 @@ Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Erf_Op::clone() const { return std::make_shared<Erf_Op>(*this); } diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index dd3ed7aba..0c171a9ea 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -23,7 +23,7 @@ const std::string Aidge::FC_Op::Type = "FC"; -std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::FC_Op::clone() const { return std::make_shared<FC_Op>(*this); } diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp index 99ccb7505..e97c0c5c3 100644 --- a/src/operator/Fold.cpp +++ b/src/operator/Fold.cpp @@ -40,7 +40,7 @@ Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op) } template <Aidge::DimIdx_t DIM> -std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Fold_Op<DIM>::clone() const { return std::make_shared<Fold_Op<DIM>>(*this); } diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index 0ebc3e3bc..46533f06d 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -46,7 +46,7 @@ Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Gather_Op::clone() const { return std::make_shared<Gather_Op>(*this); } diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp index 0f90a5a58..a65da81dd 100644 --- a/src/operator/GenericOperator.cpp +++ b/src/operator/GenericOperator.cpp @@ -51,7 +51,7 @@ Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& o Aidge::GenericOperator_Op::~GenericOperator_Op() noexcept = default; -std::shared_ptr<Aidge::Operator> Aidge::GenericOperator_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::GenericOperator_Op::clone() const { return std::make_shared<GenericOperator_Op>(*this); } diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp index bbcfd0d28..5f091c629 100644 --- a/src/operator/GlobalAveragePooling.cpp +++ b/src/operator/GlobalAveragePooling.cpp @@ -31,7 +31,7 @@ Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAvera } } -std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::GlobalAveragePooling_Op::clone() const { return std::make_shared<GlobalAveragePooling_Op>(*this); } diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp index d26679f83..bc7d60ec0 100644 --- a/src/operator/GridSample.cpp +++ b/src/operator/GridSample.cpp @@ -54,7 +54,7 @@ Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other) Aidge::GridSample_Op::~GridSample_Op() noexcept = default; -std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::GridSample_Op::clone() const { return std::make_shared<GridSample_Op>(*this); } diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp index f0b8720bc..677c50ecc 100644 --- a/src/operator/Identity.cpp +++ b/src/operator/Identity.cpp @@ -34,7 +34,7 @@ Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op) mImpl = std::make_shared<Identity_OpImpl>(*this, op.backend()); } -std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Identity_Op::clone() const { return std::make_shared<Identity_Op>(*this); } diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp index dea73f310..37c4948be 100644 --- a/src/operator/LeakyReLU.cpp +++ b/src/operator/LeakyReLU.cpp @@ -29,7 +29,7 @@ Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::LeakyReLU_Op::clone() const { return std::make_shared<LeakyReLU_Op>(*this); } diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp index 90ae8d8c7..dff7f2899 100755 --- a/src/operator/Ln.cpp +++ b/src/operator/Ln.cpp @@ -29,7 +29,7 @@ Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Ln_Op::clone() const { return std::make_shared<Ln_Op>(*this); } diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp index 668ffd04b..1f830bb82 100644 --- a/src/operator/MatMul.cpp +++ b/src/operator/MatMul.cpp @@ -30,7 +30,7 @@ Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::MatMul_Op::clone() const { return std::make_shared<MatMul_Op>(*this); } diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp index 5ce137fe6..cb72f70f8 100644 --- a/src/operator/MaxPooling.cpp +++ b/src/operator/MaxPooling.cpp @@ -46,7 +46,7 @@ Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op) } template <Aidge::DimIdx_t DIM> -std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::MaxPooling_Op<DIM>::clone() const { return std::make_shared<MaxPooling_Op<DIM>>(*this); } diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp index 61239071a..344811cc1 100644 --- a/src/operator/Memorize.cpp +++ b/src/operator/Memorize.cpp @@ -99,7 +99,7 @@ Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op) mOutputs[1] = mOutputs[0]; } -std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Memorize_Op::clone() const { return std::make_shared<Memorize_Op>(*this); } diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp index ab6bde74f..19f5d8208 100644 --- a/src/operator/MetaOperator.cpp +++ b/src/operator/MetaOperator.cpp @@ -50,7 +50,7 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar } } -std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::MetaOperator_Op::clone() const { return std::make_shared<MetaOperator_Op>(type(), mGraph->clone()); } diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp index adabcd0d3..14b1c0c81 100644 --- a/src/operator/Move.cpp +++ b/src/operator/Move.cpp @@ -36,7 +36,7 @@ Aidge::Move_Op::Move_Op(const Aidge::Move_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Move_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Move_Op::clone() const { return std::make_shared<Move_Op>(*this); } diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp index 3f163c9d6..f93c9475b 100644 --- a/src/operator/Mul.cpp +++ b/src/operator/Mul.cpp @@ -33,7 +33,7 @@ Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Mul_Op::clone() const { return std::make_shared<Mul_Op>(*this); } diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp index bd09e9d12..104dac621 100644 --- a/src/operator/Operator.cpp +++ b/src/operator/Operator.cpp @@ -20,62 +20,62 @@ #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" -// constexpr Aidge::Operator::Operator(const char* type) +// constexpr Aidge::Operator::AbsOperator(const char* type) // : mType(type) // { // // ctor // } -Aidge::Operator::~Operator() noexcept = default; +Aidge::AbsOperator::~AbsOperator() noexcept = default; /////////////////////////////////////////////////////// // IMPLEMENTATION /////////////////////////////////////////////////////// -Aidge::Elts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { +Aidge::Elts_t Aidge::AbsOperator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required for {}!", type()); return mImpl->prodConso()->getNbRequiredData(inputIdx); } -Aidge::Elts_t Aidge::Operator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const { +Aidge::Elts_t Aidge::AbsOperator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const { AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredProtected(): an implementation is required for {}!", type()); return mImpl->prodConso()->getNbRequiredProtected(inputIdx); } -Aidge::Elts_t Aidge::Operator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const { +Aidge::Elts_t Aidge::AbsOperator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const { AIDGE_ASSERT(mImpl != nullptr, "getRequiredMemory(): an implementation is required for {}!", type()); return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize); } -Aidge::Elts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const { +Aidge::Elts_t Aidge::AbsOperator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const { AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required for {}!", type()); return mImpl->prodConso()->getNbConsumedData(inputIdx); } -Aidge::Elts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const { +Aidge::Elts_t Aidge::AbsOperator::getNbProducedData(Aidge::IOIndex_t outputIdx) const { AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required for {}!", type()); return mImpl->prodConso()->getNbProducedData(outputIdx); } -void Aidge::Operator::updateConsummerProducer(){ +void Aidge::AbsOperator::updateConsummerProducer(){ AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required for {}!", type()); mImpl->prodConso()->updateConsummerProducer(); } -void Aidge::Operator::resetConsummerProducer(){ +void Aidge::AbsOperator::resetConsummerProducer(){ AIDGE_ASSERT(mImpl != nullptr, "resetConsummerProducer(): an implementation is required for {}!", type()); mImpl->prodConso()->resetConsummerProducer(); } -void Aidge::Operator::forward() { +void Aidge::AbsOperator::forward() { AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required for {}!", type()); mImpl->forward(); } -void Aidge::Operator::backward() { +void Aidge::AbsOperator::backward() { AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type()); mImpl->backward(); } -void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) { +void Aidge::AbsOperator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) { const auto& availableBackends = getAvailableBackends(); // By default, try to set the last backend anyway auto selectedBackend = backends.back(); diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index ff6fb9ce4..d5f079453 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -21,7 +21,7 @@ Aidge::OperatorTensor::OperatorTensor(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut) -: Operator(type, inputsCategory, nbOut, OperatorType::Tensor), +: AbsOperator(type, inputsCategory, nbOut, OperatorType::Tensor), mInputs(std::vector<std::shared_ptr<Tensor>>(inputsCategory.size(), nullptr)), mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) { for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) { @@ -32,7 +32,7 @@ Aidge::OperatorTensor::OperatorTensor(const std::string& type, Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other) - : Operator(other), + : AbsOperator(other), mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { @@ -203,5 +203,5 @@ void Aidge::OperatorTensor::forward() { forwardDims(true); } - Operator::forward(); + AbsOperator::forward(); } diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp index 39f61e328..505f1cf71 100644 --- a/src/operator/Pad.cpp +++ b/src/operator/Pad.cpp @@ -23,7 +23,7 @@ template <Aidge::DimIdx_t DIM> const std::string Aidge::Pad_Op<DIM>::Type = "Pad"; template <Aidge::DimIdx_t DIM> -std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Pad_Op<DIM>::clone() const { return std::make_shared<Pad_Op<DIM>>(*this); } diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp index cd5b18759..9278a056a 100644 --- a/src/operator/Pop.cpp +++ b/src/operator/Pop.cpp @@ -58,7 +58,7 @@ Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Pop_Op::clone() const { return std::make_shared<Pop_Op>(*this); } @@ -74,7 +74,7 @@ bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) { } void Aidge::Pop_Op::updateConsummerProducer() { - Operator::updateConsummerProducer(); + AbsOperator::updateConsummerProducer(); mAttributes->template getAttr<PopAttr::ForwardStep>() = 0; } @@ -93,7 +93,7 @@ std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const { } void Aidge::Pop_Op::forward() { - Operator::forward(); + AbsOperator::forward(); ++mAttributes->template getAttr<PopAttr::ForwardStep>(); } diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp index 3d48b88ab..7342447e2 100644 --- a/src/operator/Producer.cpp +++ b/src/operator/Producer.cpp @@ -70,7 +70,7 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Producer_Op::clone() const { return std::make_shared<Producer_Op>(*this); } diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp index bda26fa33..efe418158 100644 --- a/src/operator/ReLU.cpp +++ b/src/operator/ReLU.cpp @@ -29,7 +29,7 @@ Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::ReLU_Op::clone() const { return std::make_shared<ReLU_Op>(*this); } diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp index 7935edb05..13896df57 100644 --- a/src/operator/ReduceMean.cpp +++ b/src/operator/ReduceMean.cpp @@ -46,7 +46,7 @@ Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::ReduceMean_Op::clone() const { return std::make_shared<ReduceMean_Op>(*this); } diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index 0fa9a6281..070ca5c77 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -53,7 +53,7 @@ Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Reshape_Op::clone() const { return std::make_shared<Reshape_Op>(*this); } diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp index 9e5762452..2d3cc6541 100644 --- a/src/operator/Resize.cpp +++ b/src/operator/Resize.cpp @@ -50,7 +50,7 @@ Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Resize_Op::clone() const { return std::make_shared<Resize_Op>(*this); } diff --git a/src/operator/Round.cpp b/src/operator/Round.cpp index ba4eff9d1..8eb8fb5d2 100644 --- a/src/operator/Round.cpp +++ b/src/operator/Round.cpp @@ -32,7 +32,7 @@ Aidge::Round_Op::Round_Op(const Aidge::Round_Op& op) } -std::shared_ptr<Aidge::Operator> Aidge::Round_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Round_Op::clone() const { return std::make_shared<Round_Op>(*this); } diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp index 5ac08cd22..da59f360c 100644 --- a/src/operator/Scaling.cpp +++ b/src/operator/Scaling.cpp @@ -39,7 +39,7 @@ Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Scaling_Op::clone() const { return std::make_shared<Scaling_Op>(*this); } diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp index 29a9ee625..37a649400 100644 --- a/src/operator/Shape.cpp +++ b/src/operator/Shape.cpp @@ -55,7 +55,7 @@ Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Shape_Op::clone() const { return std::make_shared<Shape_Op>(*this); } diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp index bd229e6cf..53d75d872 100644 --- a/src/operator/ShiftGELU.cpp +++ b/src/operator/ShiftGELU.cpp @@ -33,7 +33,7 @@ Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::ShiftGELU_Op::clone() const { return std::make_shared<ShiftGELU_Op>(*this); } diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp index 58d4bf461..ccc9fedc1 100644 --- a/src/operator/ShiftMax.cpp +++ b/src/operator/ShiftMax.cpp @@ -37,7 +37,7 @@ Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op) * @brief Clone the operator using its copy-constructor. * @see Operator::ShiftMax_Op */ -std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::ShiftMax_Op::clone() const { return std::make_shared<ShiftMax_Op>(*this); } diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp index d97f8c523..fc2c26912 100644 --- a/src/operator/Sigmoid.cpp +++ b/src/operator/Sigmoid.cpp @@ -32,7 +32,7 @@ Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Sigmoid_Op::clone() const { return std::make_shared<Sigmoid_Op>(*this); } diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp index 3bdee8c13..a277a5ea7 100644 --- a/src/operator/Slice.cpp +++ b/src/operator/Slice.cpp @@ -57,7 +57,7 @@ Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op) } } -std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Slice_Op::clone() const { return std::make_shared<Slice_Op>(*this); } diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp index ad894c5e5..96f8adc61 100644 --- a/src/operator/Softmax.cpp +++ b/src/operator/Softmax.cpp @@ -37,7 +37,7 @@ Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Softmax_Op::clone() const { return std::make_shared<Softmax_Op>(*this); } diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp index e3ed13588..98146b956 100644 --- a/src/operator/Split.cpp +++ b/src/operator/Split.cpp @@ -79,7 +79,7 @@ Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op) } } -std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Split_Op::clone() const { return std::make_shared<Split_Op>(*this); } diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp index bd3286f09..5f81d29ff 100644 --- a/src/operator/Sqrt.cpp +++ b/src/operator/Sqrt.cpp @@ -32,7 +32,7 @@ Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op) } -std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Sqrt_Op::clone() const { return std::make_shared<Sqrt_Op>(*this); } diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp index ca7348b3b..111bb1ee3 100644 --- a/src/operator/Sub.cpp +++ b/src/operator/Sub.cpp @@ -34,7 +34,7 @@ Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Sub_Op::clone() const { return std::make_shared<Sub_Op>(*this); } diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp index fe295ab71..9e4301c32 100644 --- a/src/operator/Tanh.cpp +++ b/src/operator/Tanh.cpp @@ -32,7 +32,7 @@ Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Tanh_Op::clone() const { return std::make_shared<Tanh_Op>(*this); } diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp index 0cb1717f1..895a8d3bb 100644 --- a/src/operator/Transpose.cpp +++ b/src/operator/Transpose.cpp @@ -52,7 +52,7 @@ Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op) } } -std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Transpose_Op::clone() const { return std::make_shared<Transpose_Op>(*this); } diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp index 53b8bd544..3161ffa1e 100644 --- a/src/operator/Unfold.cpp +++ b/src/operator/Unfold.cpp @@ -99,7 +99,7 @@ Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op) } template <Aidge::DimIdx_t DIM> -std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const { +std::shared_ptr<Aidge::AbsOperator> Aidge::Unfold_Op<DIM>::clone() const { return std::make_shared<Unfold_Op>(*this); } diff --git a/src/scheduler/ProdConso.cpp b/src/scheduler/ProdConso.cpp index a3bff53c3..8b79094fe 100644 --- a/src/scheduler/ProdConso.cpp +++ b/src/scheduler/ProdConso.cpp @@ -17,7 +17,7 @@ #include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" -Aidge::ProdConso::ProdConso(const Operator& op, bool inPlace): +Aidge::ProdConso::ProdConso(const AbsOperator& op, bool inPlace): mOp(op), mInPlace(inPlace), mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()), diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp index da32a8e0e..f781735ba 100644 --- a/src/utils/Log.cpp +++ b/src/utils/Log.cpp @@ -62,7 +62,7 @@ std::vector<std::string> Aidge::Log::mContext; void Aidge::Log::log(Level level, const std::string& msg) { if (level >= mConsoleLevel) { // Apply log level style only for console. - // Styles that were already applied to msg with fmt are kept also in + // Styles that were already applied to msg with fmt are kept also in // the log file. const auto modifier = !mConsoleColor ? fmt::text_style() diff --git a/unit_tests/graphRegex/Test_examples.cpp b/unit_tests/graphRegex/Test_examples.cpp index d85ae5c89..a0fbbfa6e 100644 --- a/unit_tests/graphRegex/Test_examples.cpp +++ b/unit_tests/graphRegex/Test_examples.cpp @@ -52,4 +52,4 @@ TEST_CASE("Examples", "[GraphMatching]") { } } -} // namespace Aidge \ No newline at end of file +} // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_BitShift_Op.cpp b/unit_tests/operator/Test_BitShift_Op.cpp index 39916e4e7..85a456920 100644 --- a/unit_tests/operator/Test_BitShift_Op.cpp +++ b/unit_tests/operator/Test_BitShift_Op.cpp @@ -20,7 +20,7 @@ #include "aidge/operator/OperatorTensor.hpp" namespace Aidge { -TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") +TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") { constexpr std::uint16_t NBTRIALS = 10; @@ -101,7 +101,7 @@ TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") } } SECTION("BitShifOP Test dimensions [Wrong Dimensions]") { - + for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) { const std::size_t nb_dims = nbDimsDist(gen) + 1; std::vector<std::size_t> dims0(nb_dims); diff --git a/unit_tests/operator/Test_ConvDepthWise_Op.cpp b/unit_tests/operator/Test_ConvDepthWise_Op.cpp index 6008e3bfa..819217b31 100644 --- a/unit_tests/operator/Test_ConvDepthWise_Op.cpp +++ b/unit_tests/operator/Test_ConvDepthWise_Op.cpp @@ -70,4 +70,4 @@ TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator] REQUIRE(((res1[0].first == std::vector<DimSize_t>({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8})))); } } -} // namespace Aidge \ No newline at end of file +} // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp index bc24fc808..813892531 100644 --- a/unit_tests/operator/Test_Conv_Op.cpp +++ b/unit_tests/operator/Test_Conv_Op.cpp @@ -83,4 +83,4 @@ TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeR // << res1[0].second[3] << "}" << std::endl; } } -} // namespace Aidge \ No newline at end of file +} // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp index a050bbc40..8ca53404c 100644 --- a/unit_tests/operator/Test_Operator.cpp +++ b/unit_tests/operator/Test_Operator.cpp @@ -23,7 +23,7 @@ #include "aidge/operator/Producer.hpp" namespace Aidge { -// TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") { +// TEST_CASE("[core/operator] AbsOperator(computeReceptiveField)", "[AbsOperator][computeReceptiveFiled]") { // auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1"); // auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2"); // auto gen1 = Add(2); @@ -47,4 +47,4 @@ namespace Aidge { // REQUIRE(((res2[0].first == gen2->getOperator()->input(0).getIdx({3,2,100,28})) && (res2[0].second == std::vector<DimSize_t>({1, 1, 30, 40})))); // } // } -} // namespace Aidge \ No newline at end of file +} // namespace Aidge \ No newline at end of file diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp index 9fceedf2f..80af0c375 100644 --- a/unit_tests/recipes/Test_FuseToMetaOps.cpp +++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp @@ -41,4 +41,4 @@ TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") { REQUIRE(nbFused == 2); } -} // namespace Aidge +} // namespace Aidge diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp index 2adf882ca..ba691e10e 100644 --- a/unit_tests/recipes/Test_MatMulToFC.cpp +++ b/unit_tests/recipes/Test_MatMulToFC.cpp @@ -115,4 +115,4 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") { } } -} // namespace Aidge +} // namespace Aidge -- GitLab