diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py index ad7ee666ebb56941cdc426220cd117a0e3f8b8d1..4aacfafd7d51830dc89b7b30ea5ebf521a13fe30 100644 --- a/aidge_core/unit_tests/test_impl.py +++ b/aidge_core/unit_tests/test_impl.py @@ -18,7 +18,7 @@ GLOBAL_CPT = 0 class testImpl(aidge_core.OperatorImpl): def __init__(self, op: aidge_core.Operator): - aidge_core.OperatorImpl.__init__(self, op) # Required to avoid type error ! + aidge_core.OperatorImpl.__init__(self, op, 'cpu') # Required to avoid type error ! def forward(self): global GLOBAL_CPT diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py index c541ae0e03459a0a7200795bc2d3c6b70c13be3b..c94960733b24444218b1209463adbda11b89f6e8 100644 --- a/aidge_core/unit_tests/test_operator_binding.py +++ b/aidge_core/unit_tests/test_operator_binding.py @@ -108,7 +108,7 @@ class test_operator_binding(unittest.TestCase): """Dummy implementation to test that C++ call python code """ def __init__(self, op: aidge_core.Operator): - aidge_core.OperatorImpl.__init__(self, op) # Recquired to avoid type error ! + aidge_core.OperatorImpl.__init__(self, op, 'test_impl') # Recquired to avoid type error ! self.idx = 0 def forward(self): diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index aaca66f7953f0ade51d8a80c6ec8dae16d3e553d..931b1b26a04e8886c211d77f8b0147c2140d350a 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -23,12 +23,16 @@ #include "aidge/data/Tensor.hpp" #include "aidge/data/Database.hpp" #include "aidge/data/DataProvider.hpp" + #include "aidge/graph/Connector.hpp" #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" #include "aidge/graph/OpArgs.hpp" #include "aidge/graphRegex/GraphRegex.hpp" + +#include "aidge/filler/Filler.hpp" + #include "aidge/nodeTester/ConditionalInterpreter.hpp" #include "aidge/operator/Add.hpp" @@ -65,7 +69,6 @@ #include "aidge/stimuli/Stimulus.hpp" #include "aidge/recipes/Recipes.hpp" -#include "aidge/filler/Filler.hpp" #include "aidge/utils/Attributes.hpp" #include "aidge/utils/StaticAttributes.hpp" diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp index 8b5aba10dbc2691b5d607cda28eba621335881d1..04044ed1c77915ec10b5af5b660cf8e6b20c81b2 100644 --- a/include/aidge/backend/OperatorImpl.hpp +++ b/include/aidge/backend/OperatorImpl.hpp @@ -9,12 +9,12 @@ * ********************************************************************************/ -#ifndef AIDGE_OPERATORIMPL_H_ -#define AIDGE_OPERATORIMPL_H_ +#ifndef AIDGE_BACKEND_OPERATORIMPL_H_ +#define AIDGE_BACKEND_OPERATORIMPL_H_ -#include <cstddef> +#include <string> #include <vector> -#include <memory> + #include "aidge/utils/Types.h" namespace Aidge { @@ -22,10 +22,13 @@ class Operator; class OperatorImpl { public: - OperatorImpl(const Operator& op); + OperatorImpl(const Operator& op, const std::string& backend); virtual void forward(); virtual void backward(); + const std::string& backend() const noexcept { + return mBackend; + } /** * @brief Minimum amount of data from a specific input required by the * implementation to be run. @@ -73,9 +76,10 @@ public: protected: const Operator &mOp; + const std::string mBackend; std::vector<NbElts_t> mNbConsumedData; std::vector<NbElts_t> mNbProducedData; }; } // namespace Aidge -#endif /* AIDGE_OPERATORIMPL_H_ */ +#endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */ diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp index 538a6bb27c7af8b380dc1eaba6845bbf1ab42dbf..f3fa4ef5164a2eed7caaa7baa7f83e7ed00403b8 100644 --- a/include/aidge/backend/TensorImpl.hpp +++ b/include/aidge/backend/TensorImpl.hpp @@ -72,7 +72,7 @@ private: class TensorImpl { protected: - const char *mBackend; + const std::string mBackend; /// @brief Device id. const DeviceIdx_t mDevice; /// Number of elements (to be) stored. @@ -81,7 +81,7 @@ protected: public: TensorImpl() = delete; - TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) + TensorImpl(const std::string& backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) { @@ -97,7 +97,7 @@ public: * Return the (backend, device) pair for this implementation. */ std::pair<std::string, DeviceIdx_t> device() const noexcept { - return std::make_pair(std::string(mBackend), mDevice); + return std::make_pair(mBackend, mDevice); } /** @@ -194,7 +194,7 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented"); } - constexpr const char *backend() const { return mBackend; } + const std::string backend() const { return mBackend; } /** * @brief Copy from another backend. diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp index 69ebb7bb916a2a6df1267339666b3277a8b5cbf1..922acacb070c745b2924d1fb787602326ec9d05a 100644 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ b/include/aidge/backend/cpu/data/TensorImpl.hpp @@ -14,7 +14,6 @@ #include "aidge/backend/TensorImpl.hpp" #include "aidge/data/Tensor.hpp" -#include "aidge/data/half.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -31,21 +30,12 @@ private: std::unique_ptr<T[]> mDataOwner; public: - static constexpr const char *Backend = "cpu"; + static const std::string Backend; +public: TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {} - bool operator==(const TensorImpl &otherImpl) const override final { - const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl); - AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts); - - std::size_t i = 0; - for (; i < mNbElts && - *static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i)); - ++i) { - } - return i == mNbElts; - } + bool operator==(const TensorImpl &other) const override final; static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) { return std::make_shared<TensorImpl_cpu<T>>(device, dims); @@ -53,14 +43,7 @@ public: inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } - void zeros() override final { - if (mData.empty()) { - lazyInit(); - } - for (std::size_t i = 0; i < mData.size(); ++i) { - *(mData.data() + i) = T(0); - } - } + void zeros() override final; void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { const T* srcT = static_cast<const T *>(src); @@ -71,64 +54,7 @@ public: std::copy(srcT, srcT + length, dstT); } - void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final { - if (length == 0) { - return; - } - - T* dstT = static_cast<T *>(rawPtr(offset)); - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); - switch (srcDt) - { - case DataType::Float64: - std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, - dstT); - break; - case DataType::Float32: - std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, - dstT); - break; - case DataType::Float16: - std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, - dstT); - break; - case DataType::Int64: - std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, - dstT); - break; - case DataType::UInt64: - std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, - dstT); - break; - case DataType::Int32: - std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, - dstT); - break; - case DataType::UInt32: - std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, - dstT); - break; - case DataType::Int16: - std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, - dstT); - break; - case DataType::UInt16: - std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, - dstT); - break; - case DataType::Int8: - std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, - dstT); - break; - case DataType::UInt8: - std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, - dstT); - break; - default: - AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); - break; - } - } + void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final; void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final { AIDGE_ASSERT(device.first == Backend, "backend must match"); @@ -185,6 +111,10 @@ private: } }; + +template <typename T> +const std::string TensorImpl_cpu<T>::Backend = "cpu"; + namespace { static Registrar<Tensor> registrarTensorImpl_cpu_Float64( {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create); diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 1f9c5a5ec14cca4469b0329f2f968cf9dbc7b0de..2503c01b385a7a28eda0490a0715ef2de3a1f1db 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -24,6 +24,10 @@ #include "aidge/backend/TensorImpl.hpp" #include "aidge/data/Data.hpp" +#include "aidge/operator/Add.hpp" +#include "aidge/operator/Div.hpp" +#include "aidge/operator/Mul.hpp" +#include "aidge/operator/Sub.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ArrayHelpers.hpp" @@ -231,6 +235,102 @@ class Tensor : public Data, return *mImpl == *(otherTensor.mImpl); } + /** + * @brief Element-wise addition operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator+(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto add_ = Add_Op(2); + add_.associateInput(0, std::make_shared<Tensor>(*this)); + add_.associateInput(1, std::make_shared<Tensor>(other)); + add_.computeOutputDims(); + add_.setDataType(dataType()); + add_.setBackend(mImpl->backend()); + add_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return add_.getOutput(0)->clone(); + } + + /** + * @brief Element-wise substraction operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator-(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto sub_ = Sub_Op(); + sub_.associateInput(0, std::make_shared<Tensor>(*this)); + sub_.associateInput(1, std::make_shared<Tensor>(other)); + sub_.computeOutputDims(); + sub_.setDataType(dataType()); + sub_.setBackend(mImpl->backend()); + sub_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return sub_.getOutput(0)->clone(); + } + + /** + * @brief Element-wise multiplication operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator*(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto mul_ = Mul_Op(); + mul_.associateInput(0, std::make_shared<Tensor>(*this)); + mul_.associateInput(1, std::make_shared<Tensor>(other)); + mul_.computeOutputDims(); + mul_.setDataType(dataType()); + mul_.setBackend(mImpl->backend()); + mul_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return mul_.getOutput(0)->clone(); + } + + /** + * @brief Element-wise division operation for two ``Tensor``s. + * @note ``Tensor``s should be stored on the same backend. + * @todo If input ``Tensor``s have a different dataType, the output should + * have the dataType of the ``Tensor`` with the highest precision. + * + * @param other + * @return Tensor + */ + Tensor operator/(const Tensor& other) const { + AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation."); + AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend"); + AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend"); + auto div_ = Div_Op(); + div_.associateInput(0, std::make_shared<Tensor>(*this)); + div_.associateInput(1, std::make_shared<Tensor>(other)); + div_.computeOutputDims(); + div_.setDataType(dataType()); + div_.setBackend(mImpl->backend()); + div_.forward(); + // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>; + return div_.getOutput(0)->clone(); + } + public: /** * @brief Perform a deep copy of the tensor. @@ -248,6 +348,10 @@ public: return newTensor; } + const std::string backend() const { + return hasImpl() ? getImpl()->backend() : ""; + } + /** * @brief Set the backend of the Tensor associated implementation. If there * was no previous implementation set, data will be allocated, but it will @@ -310,8 +414,8 @@ public: * @brief Get the Impl object * @return constexpr const std::shared_ptr<TensorImpl>& */ - constexpr const std::shared_ptr<TensorImpl> &getImpl() const { return mImpl; } - constexpr std::size_t getImplOffset() const { return mImplOffset; } + constexpr const std::shared_ptr<TensorImpl>& getImpl() const noexcept { return mImpl; } + constexpr std::size_t getImplOffset() const noexcept { return mImplOffset; } /** * @brief Set the Impl object @@ -461,6 +565,26 @@ public: return mGrad; } + /** + * @brief Associate the gradient with a Tensor instance and set its implementation + * if none was previously set. + * @note Dimensions for the Tensor instance are copied from the original current Tensor. + * @note If a Tensor instance was already associated, only the implementation is created + * with values set to 0. + * @note If Tensor instance and implementation already existed for the gradient + * nothing is done. + */ + void initGradient() { + if (!mGrad) { + mGrad = std::make_shared<Tensor>(mDims); + } + if (!mGrad->hasImpl()) { + mGrad->setDataType(dataType()); + mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu"); + mGrad->zeros(); + } + } + /** * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor. * Beware: do not use this function with the storage index! diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index 788748c7953336118de20a6eba5db54d87b12f83..06f73c97fddce8b9979862351c5a39c985692eb9 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -62,11 +62,7 @@ public: return mNodes == gv.mNodes; } - NodePtr operator[](const std::string& nodeName) - { - AIDGE_ASSERT(mNodeRegistry.find(nodeName) != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name()); - return mNodeRegistry.at(nodeName); - } + const NodePtr operator[](const std::string& nodeName) const; /////////////////////////////////////////////////////// // FUNCTIONAL DESCRIPTION @@ -82,14 +78,14 @@ public: * @brief Name of the node. * @return std::string */ - std::string name() const; + inline std::string name() const noexcept { return mName; } /** * @brief Set the node name. * @warning Undefined behaviour when several Nodes have the same name. * @param name New name for the node. */ - void setName(const std::string &name); + inline void setName(const std::string &name) { mName = name; } /** * @brief Save the GraphView as a Mermaid graph in a .md file at the @@ -105,11 +101,9 @@ public: * @param nodePtr Node to check * @return bool True is nodePtr belongs to the GraphView. */ - inline bool inView(NodePtr nodePtr) const { - return mNodes.find(nodePtr) != mNodes.end(); - } + bool inView(const NodePtr& nodePtr) const; - NodePtr getRootNode() { + inline NodePtr rootNode() const noexcept { return mRootNode; } @@ -120,41 +114,32 @@ public: /////////////////////////////////////////////////////// public: /** @brief Get reference to the set of input Nodes. */ - inline std::set<NodePtr> inputNodes() const noexcept { - std::set<NodePtr> nodes; - for (auto node : mInputNodes) { - if (node.first != nullptr) { - nodes.insert(node.first); - } - } - return nodes; - } + std::set<NodePtr> inputNodes() const; + /** @brief Get reference to the set of output Nodes. */ - inline std::set<NodePtr> outputNodes() const noexcept { - std::set<NodePtr> nodes; - for (auto node : mOutputNodes) { - if (node.first != nullptr) { - nodes.insert(node.first); - } - } - return nodes; - } + std::set<NodePtr> outputNodes() const; + /** @brief Assess if the given Node is an input Node of the GraphView object. */ - inline bool isInputNode(NodePtr nodePtr) const { - const auto nodes = inputNodes(); - return (nodes.find(nodePtr) != nodes.end()) ? true : false; - } + bool isInputNode(const NodePtr& nodePtr) const; + /** @brief Assess if the given Node is an output Node of the GraphView object. */ - inline bool isOutputNode(NodePtr nodePtr) const { - const auto nodes = outputNodes(); - return (nodes.find(nodePtr) != nodes.end()) ? true : false; - } + bool isOutputNode(const NodePtr& nodePtr) const; void setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs); void setOrderedOutputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& outputs); - inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() const { return mInputNodes; }; - inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() const { return mOutputNodes; }; + /** + * @brief Get inputs of the current GraphView with their associated id. + * The rank of the nodes are their rank in the vector. + * @return const std::vector<std::pair<NodePtr, IOIndex_t>>& + */ + inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedInputs() const noexcept { return mInputNodes; }; + /** + * @brief Get outputs of the current GraphView with their associated id. + * The rank of the nodes are their rank in the vector. + * @return const std::vector<std::pair<NodePtr, IOIndex_t>>& + */ + inline const std::vector<std::pair<NodePtr, IOIndex_t>>& getOrderedOutputs() const noexcept { return mOutputNodes; }; /** * @brief List outside data input connections of the GraphView. @@ -216,7 +201,7 @@ public: * If not, add a Transpose Operator. * 4 - Propagate Tensor dimensions through the consecutive Operators. */ - void compile(const std::string& backend, const Aidge::DataType datatype, DeviceIdx_t device = 0); + void compile(const std::string& backend = "cpu", const Aidge::DataType datatype = DataType::Float32, DeviceIdx_t device = 0); /** * @brief Compute dimensions of input/output Tensors for each Operator of the @@ -225,9 +210,9 @@ public: void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}); /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ - void setBackend(const std::string &backend, DeviceIdx_t device = 0); + void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const; /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ - void setDataType(const DataType &datatype); + void setDataType(const DataType& datatype) const; /////////////////////////////////////////////////////// // TOPOLOGY diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 3115cedca1f2a3bcc4a1330b96e90669bf7611a2..93cfb44514e39a489ccb75d86fd6e114da5c6162 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -12,15 +12,11 @@ #ifndef AIDGE_CORE_OPERATOR_ADD_H_ #define AIDGE_CORE_OPERATOR_ADD_H_ -#include <numeric> -#include <vector> -#include <cmath> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -44,15 +40,7 @@ public: * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - Add_Op(const Add_Op& op) - : OperatorTensor(op) - { - if (op.mImpl){ - SET_IMPL_MACRO(Add_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ - mImpl = nullptr; - } - } + Add_Op(const Add_Op& op); /** * @brief Clone the operator using its copy-constructor. @@ -74,10 +62,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Add_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName() { return {"data_input_0", "data_input_n"}; diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index e427aac72ad3948d0d03f588c930cfccedfb1885..031046500e0c50443a0a1f4e98a6471625f25eb4 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -13,14 +13,18 @@ #define AIDGE_CORE_OPERATOR_AVGPOOLING_H_ #include <array> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> -#include <cmath> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -60,9 +64,9 @@ public: : OperatorTensor(op), Attributes_(op) { - if (op.mImpl){ - SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -101,8 +105,7 @@ public: std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, - const IOIndex_t outputIdx = 0) const override final - { + const IOIndex_t outputIdx = 0) const override final { if (outputIdx != 0) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); } @@ -153,8 +156,8 @@ public: } }; -template <DimIdx_t DIM> -const std::string AvgPooling_Op<DIM>::Type = "AvgPooling"; +template <Aidge::DimIdx_t DIM> +const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling"; template <std::array<DimSize_t, 1>::size_type DIM> inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 83ad2dbbb695e42c11cb794c7d5bd4578056d941..51673dd3c8b41c657c1df6e951a2cb3a842308b5 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -55,7 +55,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp index 7cc3985674219daf087381049d3a845299b3e250..bbc776a1175a1fc29d08c3872649a6b7aac2f04f 100644 --- a/include/aidge/operator/Cast.hpp +++ b/include/aidge/operator/Cast.hpp @@ -39,7 +39,11 @@ public: Cast_Op(const Cast_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Cast_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Cast_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -50,12 +54,7 @@ public: return std::make_shared<Cast_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - if (Registrar<Cast_Op>::exists({name})) { - mImpl = Registrar<Cast_Op>::create({name})(*this); - } - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; void forward() override; diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 450c40bd210e0a4be891e436f03330a984e221be..611ff6bd53b1f16f87f73dd951d0645b9765262e 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -12,16 +12,16 @@ #ifndef AIDGE_CORE_OPERATOR_CONCAT_H_ #define AIDGE_CORE_OPERATOR_CONCAT_H_ -#include <numeric> -#include <vector> -#include <cmath> #include <memory> +#include <stdexcept> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" @@ -56,7 +56,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Concat_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Concat_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -70,51 +70,9 @@ public: return std::make_shared<Concat_Op>(*this); } - // Data operator[](const char* inputName) override final { - // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] : - // (strcmp(inputName, "weight") ? mInputs[1] : - // (strcmp(inputName, "bias") ? mInputs[2] : - // nullptr)); - // assert((in!=nullptr) && "No such parameter"); - // return *in; - // } - + void computeOutputDims() override final; - void computeOutputDims() override final { - // Every input is non-empty with the same number of dimensions - bool associated = (getInput(0) != nullptr); - associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input - auto outputDims = getInput(0)->dims(); - const auto firstInputNbDims = getInput(0) -> nbDims(); - for (IOIndex_t i = 1; i < nbInputs(); ++i) { - if (!getInput(i)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); - } - - if (getInput(i)->nbDims() == firstInputNbDims) { - for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) { - if (dim == getAttr<ConcatAttr::Axis>()) { - outputDims[dim] += getInput(i)->dims()[dim]; - } - else { - associated &= (getInput(i)->dims()[dim] == outputDims[dim]); - } - } - } - else { - associated = false; - break; - } - } - if (associated) { - getOutput(0)->resize(outputDims); - } - } - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Concat_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_0", "data_input_n"}; diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 517af5b050daa200e7d608aa71660c86b17701b0..c93a098106be76f30c1150ea64c464492429feb9 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -13,17 +13,20 @@ #define AIDGE_CORE_OPERATOR_CONV_H_ #include <array> -#include <cmath> -#include <cstddef> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" -#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -77,9 +80,9 @@ public: : OperatorTensor(op), Attributes_(op) { - if (op.mImpl){ - SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -134,8 +137,10 @@ public: } } - -std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { + std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> + computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, + const std::vector<DimSize_t>& outputDims, + const IOIndex_t outputIdx = 0) const override { if (outputIdx != 0) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); } @@ -191,6 +196,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { SET_IMPL_MACRO(Conv_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index 035bd84b647bc7b4c57daa14d20ebe60e59e83c2..559c0fc7a97a3a882f6720a91d02dee1af70abd8 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -13,14 +13,17 @@ #define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ #include <array> -#include <cmath> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -72,7 +75,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index be654a3c015e5810892c1e23f08cc1f4b83b2d93..49410db044518dc3ca2cc33285d570197d83b10a 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -12,14 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_DIV_H_ #define AIDGE_CORE_OPERATOR_DIV_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" @@ -40,9 +39,9 @@ public: Div_Op(const Div_Op& op) : OperatorTensor(op) { - if (op.mImpl){ - SET_IMPL_MACRO(Div_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(Div_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -57,11 +56,7 @@ public: void computeOutputDims() override final; - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Div_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp index 5a92b5dc45b6a090be0d9306dbfc21b1c0ae6edb..5ec10522e889bb1188b2304940fd892c0928b414 100644 --- a/include/aidge/operator/Erf.hpp +++ b/include/aidge/operator/Erf.hpp @@ -12,16 +12,14 @@ #ifndef AIDGE_CORE_OPERATOR_ERF_H_ #define AIDGE_CORE_OPERATOR_ERF_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -40,9 +38,9 @@ public: Erf_Op(const Erf_Op& op) : OperatorTensor(op) { - if (op.mImpl){ - SET_IMPL_MACRO(Erf_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(Erf_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -55,10 +53,7 @@ public: return std::make_shared<Erf_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Erf_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index c111e38b00e69c8d0aecd9df0023f07a47a3865d..39b28c125c917f07c2cf238988e68075adeceb8e 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -13,13 +13,10 @@ #define AIDGE_CORE_OPERATOR_FC_H_ #include <array> -#include <cmath> -#include <numeric> #include <memory> #include <vector> #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" @@ -58,7 +55,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(FC_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(FC_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -68,46 +65,15 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::FC_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<Operator> clone() const override final { return std::make_shared<FC_Op>(*this); } - void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final { - assert(inputIdx < 3 && "operators supports only 3 inputs"); - assert(data->type() == Tensor::Type && "input data must be of Tensor type"); - // TODO: FIXME: check this, because data dims may not be initialized at this point... - //if (inputIdx == 2) { - // assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0)); - // assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1); - //} - mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); - if (inputIdx == 0 && getInput(0)->nbDims() == 1) - mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); - } + void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; - void computeOutputDims() override final { - bool associated = true; - for (IOIndex_t i = 0; i < nbInputs(); ++i) { - if (!getInput(i)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); - } - associated &= !(getInput(i)->empty()); - } - if (associated) { - // <batch, OutChannels> - mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()}); - } - } + void computeOutputDims() override final; - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(FC_Op, *this, name); - mOutputs[0]->setBackend(name, device); - - // By default, automatically set backend for weight and bias inputs - getInput(1)->setBackend(name, device); - getInput(2)->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input", "weight", "bias"}; diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index 142f6582a3afbc85ccd951fcfeff2a924a35e718..b7d18e6443404730bbcb73cf7e6da97b8b3e6a7c 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -12,16 +12,14 @@ #ifndef AIDGE_CORE_OPERATOR_GATHER_H_ #define AIDGE_CORE_OPERATOR_GATHER_H_ -#include <cassert> +#include <cstdint> // std::int64_t #include <memory> +#include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" @@ -59,8 +57,8 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Gather_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(Gather_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -75,10 +73,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Gather_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 20b0cdc4aa8a42043c37851ef110427a561e5e1d..e7d60285b4d45826f1d73635d54f4532b4fb1598 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -15,8 +15,6 @@ #include <memory> #include <vector> #include <string> -#include <cassert> -#include <cstring> #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -38,8 +36,8 @@ private: public: GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut) : OperatorTensor(type, nbData, nbParam, nbOut) - { - mImpl = std::make_shared<OperatorImpl>(*this); + { + mImpl = std::make_shared<OperatorImpl>(*this, ""); } /** @@ -49,9 +47,11 @@ public: GenericOperator_Op(const GenericOperator_Op& op) : OperatorTensor(op) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); } + ~GenericOperator_Op() = default; + /** * @brief Clone the operator using its copy-constructor. * @see Operator::GenericOperator_Op @@ -60,50 +60,20 @@ public: return std::make_shared<GenericOperator_Op>(*this); } +public: + void computeOutputDims() override final; + + bool outputDimsForwarded() const override final; + + void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } + void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } + // Helper functions that can be used with setComputeOutputDims(): static const ComputeDimsFunc Identity; static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs); - inline void setComputeOutputDims(ComputeDimsFunc func) { mComputeOutputDims = func; } - - - void computeOutputDims() override final { - if (mComputeOutputDims) { - std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>()); - for (std::size_t i = 0; i < nbInputs(); ++i) { - if (getInput(i)) { - inputsDims[i] = getInput(i)->dims(); - } - } - - const auto& outputsDims = mComputeOutputDims(inputsDims); - assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs"); - for (std::size_t i = 0; i < nbOutputs(); ++i) { - mOutputs[i]->resize(outputsDims[i]); - } - } - else { - assert(false && "Cannot compute output dim of a GenericOperator"); - } - } - - bool outputDimsForwarded() const override final { - if (mComputeOutputDims) { - return !(mOutputs[0]->empty()); - } - else { - assert(false && "GenericOperator cannot forward dims"); - return false; - } - } - - - ~GenericOperator_Op() = default; - - void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } - void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } }; /** diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index c2e6eaff77971c3dcf350a02bc5089d08b5c8488..27432bc5bb251003e9e93261593e12c2fa704f3d 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -40,9 +40,9 @@ public: static const std::string Type; Identity_Op() - : OperatorTensor(Type, 1, 0, 1) + : OperatorTensor(Type, 1, 0, 1) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, ""); } /** @@ -52,7 +52,7 @@ public: Identity_Op(const Identity_Op& op) : OperatorTensor(op) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); } /** @@ -65,11 +65,16 @@ public: void computeOutputDims() override final {} // Do nothing + /** + * @brief Check if output dimensions have been computed. + * @note Since Indentity has no output Tensor, this function checks if its + * only input's dimensions have been computed. + * + * @return true Input has dimensions. + * @return false Input has no dimensions or is a nullptr. + */ bool outputDimsForwarded() const override final { - if (mInputs[0]) - return !mInputs[0]->empty(); - else - return false; + return mInputs[0] ? !mInputs[0]->empty() : false; } diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index c48b85b4a7af71fde0f8136732597e098c966839..83a7c30fce7e0f68576f367d4b0bfe48edf4b3b6 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -55,8 +55,8 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(LeakyReLU_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend()); + } else { mImpl = nullptr; } } diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 596aa634693941d8e3a23ac955281cfd131e56ef..43bd8b1654206df15cd869cf2d37a216fcc4a733 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -17,7 +17,6 @@ #include <vector> #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Registrar.hpp" @@ -39,7 +38,11 @@ public: */ MatMul_Op(const MatMul_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(MatMul_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -64,10 +67,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override final { - SET_IMPL_MACRO(MatMul_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName() { return {"data_input1", "data_input2"}; @@ -82,4 +82,4 @@ inline std::shared_ptr<Node> MatMul(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */ +#endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */ diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 06ac30158f80a946b9310a93c8f81cc3ee975c84..5b09aa02cd0665172a9ae69549d8d9311e10d024 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -13,16 +13,20 @@ #define AIDGE_CORE_OPERATOR_MAXPOOLING_H_ #include <array> -#include <numeric> +#include <cmath> // std::ceil, std::floor +#include <cstddef> // std::size_t +#include <functional> +#include <memory> +#include <stdexcept> // std::runtime_error #include <vector> -#include <cmath> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" -#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -64,9 +68,9 @@ public: : OperatorTensor(op), Attributes_(op) { - if (op.mImpl){ - SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend()); + } else { mImpl = nullptr; } } diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp index 8991ccb44eb4926f375ff102858f4683e1bea4d8..7de34563adcaabd63ab036232d4d7b6539fd11eb 100644 --- a/include/aidge/operator/Memorize.hpp +++ b/include/aidge/operator/Memorize.hpp @@ -12,17 +12,17 @@ #ifndef AIDGE_CORE_OPERATOR_MEMORIZE_H_ #define AIDGE_CORE_OPERATOR_MEMORIZE_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" namespace Aidge { enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep }; @@ -47,14 +47,19 @@ public: } /** - * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ Memorize_Op(const Memorize_Op& op) : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Memorize_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Memorize_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } mOutputs[1] = mOutputs[0]; } @@ -66,10 +71,7 @@ public: return std::make_shared<Memorize_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Memorize_Op>::create({name})(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; void computeOutputDims() override; bool outputDimsForwarded() const override; @@ -98,4 +100,4 @@ const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = { }; } -#endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_OPERATOR_MEMORIZE_H_ */ diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 7f36eca2c4586f61f72e0d842d2d576450cd1596..4d719b6cb755bb2ddff96905f2e5b6bc24844e37 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -12,10 +12,18 @@ #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_ #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_ -#include "aidge/operator/OperatorTensor.hpp" +#include <array> +#include <memory> +#include <string> + +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/graph/GraphView.hpp" #include "aidge/graph/OpArgs.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/scheduler/Scheduler.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" namespace Aidge { class MetaOperator_Op : public OperatorTensor, @@ -28,7 +36,7 @@ public: std::weak_ptr<Node> mUpperNode; public: - MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph); + MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph); /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 75304078829475b1488640dc39aeee8b64f1c3e5..cc9fba59431356a132330e453288f2f6e7141178 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -19,7 +19,6 @@ #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" @@ -43,7 +42,11 @@ public: Mul_Op(const Mul_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Mul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Mul_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -56,10 +59,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Mul_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index 396c60e46127ee9312745a92f9112dbc0742a584..17c8204c1fec4a54e8194bf2db1dc6e5a616fd23 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -81,7 +81,7 @@ public: virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0; /** - * @brief Set the specified input by performing a deep copy of the given data. + * @brief Set the specified input value by performing a deep copy of the given data. * The pointer itself is not changed, thus keeping the current connections. * @param inputIdx Index of the input to set. * @param data Data to copy. @@ -90,7 +90,7 @@ public: virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0; virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0; /** - * @brief Set the specified output by performing a deep copy of the given data. + * @brief Set the specified output value by performing a deep copy of the given data. * The pointer itself is not changed, thus keeping the current connections. * @param inputIdx Index of the input to set. */ @@ -110,6 +110,9 @@ public: /////////////////////////////////////////////////////// // IMPLEMENTATION /////////////////////////////////////////////////////// + std::string backend() const noexcept { + return mImpl ? mImpl->backend() : ""; + } virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0; virtual void setDataType(const DataType& dataType) const = 0; diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index 504a416488651d43126a60981cd8afe0f95821f2..adf45c2d8311112fa145097ee98f46d120bd41ff 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -17,12 +17,12 @@ #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" namespace Aidge { +class Tensor; class OperatorTensor : public Operator { /* TODO: Add an attribute specifying the type of Data used by the Operator. * The same way ``Type`` attribute specifies the type of Operator. Hence this @@ -41,26 +41,9 @@ public: OperatorTensor() = delete; OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam, - const IOIndex_t nbOut) - : Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor), - mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)), - mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) { - mOutputs[i] = std::make_shared<Tensor>(); - mOutputs[i]->setDataType(DataType::Float32); - } - } + const IOIndex_t nbOut); - OperatorTensor(const OperatorTensor& other) - : Operator(other), - mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), - mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { - mOutputs[i] = std::make_shared<Tensor>(); - // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); - // datatype already copied - } - } + OperatorTensor(const OperatorTensor& other); ~OperatorTensor(); @@ -76,17 +59,13 @@ public: void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final; const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const; - inline std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { - return std::static_pointer_cast<Data>(getInput(inputIdx)); - } + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final; // output management void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override; void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override; virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const; - inline std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final { - return std::static_pointer_cast<Data>(getOutput(outputIdx)); - } + std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final; /////////////////////////////////////////////////// /////////////////////////////////////////////////// diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp index cb4ba871a55b9dfd1c835c05949c3c18966b7f5a..9109ccaeb8bc648fe74510216fad93299740b9bf 100644 --- a/include/aidge/operator/Pop.hpp +++ b/include/aidge/operator/Pop.hpp @@ -12,17 +12,16 @@ #ifndef AIDGE_CORE_OPERATOR_POP_H_ #define AIDGE_CORE_OPERATOR_POP_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" namespace Aidge { enum class PopAttr { ForwardStep }; @@ -40,9 +39,7 @@ public: Pop_Op() : OperatorTensor(Type, 1, 0, 1), Attributes_(attr<PopAttr::ForwardStep>(0)) - { - - } + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -52,7 +49,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Pop_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Pop_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -63,10 +64,7 @@ public: return std::make_shared<Pop_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Pop_Op>::create({name})(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; void computeOutputDims() override final; void updateConsummerProducer() override; diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index ec4eebf9ddba475310ba292dd5923ba50933545d..f2becdc60ceb44c19e341496f71e09f061cea55f 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -12,15 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_POW_H_ #define AIDGE_CORE_OPERATOR_POW_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" @@ -41,7 +39,7 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(Pow_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Pow_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -58,15 +56,12 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Pow_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; - static const std::vector<std::string> getInputsName(){ + static const std::vector<std::string> getInputsName() { return {"data_input_1", "data_input_2"}; } - static const std::vector<std::string> getOutputsName(){ + static const std::vector<std::string> getOutputsName() { return {"data_output"}; } }; diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index c9b1f6e4aa5d82006d4bed880151ac1a22a4882b..66c66d90b4ed465d31ed20dd41245fed7a71d58e 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -12,7 +12,9 @@ #ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_ #define AIDGE_CORE_OPERATOR_PRODUCER_H_ +#include <cstddef> #include <array> +#include <memory> #include <vector> #include "aidge/utils/Types.h" @@ -42,41 +44,40 @@ public: Producer_Op(const std::array<DimSize_t, DIM>& dims, bool constant = false) : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) + Attributes_(attr<ProdAttr::Constant>(constant)) { mOutputs[0]->resize(dims); - mImpl = std::make_shared<OperatorImpl>(*this); + // mImpl = std::make_shared<OperatorImpl>(*this, ""); + mImpl = nullptr; } - Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false) - : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) - { - mOutputs[0] = tensor; // copy the pointer of the Tensor - mImpl = std::make_shared<OperatorImpl>(*this); - } + /** + * @brief Construct a new Producer_Op object from a Tensor. + * + * @param tensor Tensor to set in the Prducer. + * @param constant Whether the Producer should be considered constant. + */ + Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false); /** - * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). * @param op OperatorTensor to copy. */ - Producer_Op(const Producer_Op& op) - : OperatorTensor(op), - Attributes_(op) - { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { - mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i))); - } - if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){ - SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend()); - }else{ - mImpl = std::make_shared<OperatorImpl>(*this); - } - } + Producer_Op(const Producer_Op& op); + +public: + /** + * @brief Conversion operator from Producer to Tensor. + * + * @return std::shared_ptr<Tensor> + */ + operator std::shared_ptr<Tensor>() const { return mOutputs[0]; } +public: /** * @brief Clone the operator using its copy-constructor. - * @see Operator::Producer_Op + * @see Operator::Producer_Op(const Producer_Op&) */ std::shared_ptr<Operator> clone() const override { return std::make_shared<Producer_Op>(*this); @@ -86,17 +87,14 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input."); } - void computeOutputDims() override final {} + void computeOutputDims() noexcept override final {} - bool outputDimsForwarded() const override final {return true;} + inline bool outputDimsForwarded() const noexcept override final { return true; } inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Producer_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {}; @@ -105,7 +103,6 @@ public: return {"data_output"}; } -public: void forward() override final { fmt::print("Basic Producer forward() function.\n"); } diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 5b8f5c4b819f9a2f8cf518bdc50c445fbce38102..963de31c49f48784e92434b2b563d6c008e2d4fd 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -16,11 +16,11 @@ #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -40,7 +40,7 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(ReLU_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(ReLU_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -55,10 +55,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(ReLU_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index 09f1d58359b265af58fd78ef8de54dd1944b5cf1..ab27e4e0233052f7cc155ed0375175a27d3edcf5 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -12,17 +12,15 @@ #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ -#include <algorithm> // std::for_each -#include <array> -#include <cmath> #include <cstdint> // std::int32_t -#include <numeric> +#include <memory> +#include <string> #include <vector> -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -30,21 +28,20 @@ namespace Aidge { enum class ReduceMeanAttr { Axes, KeepDims }; -template <DimIdx_t DIM> class ReduceMean_Op : public OperatorTensor, - public Registrable<ReduceMean_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>, - public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> { + public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>, + public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> { public: static const std::string Type; ReduceMean_Op() = delete; - using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t>; + using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>; template <ReduceMeanAttr e> using attr = typename Attributes_::template attr<e>; - constexpr ReduceMean_Op(const std::array<std::int32_t, DIM> &axes, DimSize_t keep_dims) + ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims) : OperatorTensor(Type, 1, 0, 1), Attributes_(attr<ReduceMeanAttr::Axes>(axes), attr<ReduceMeanAttr::KeepDims>(keep_dims)) {} @@ -53,13 +50,13 @@ class ReduceMean_Op : public OperatorTensor, * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - ReduceMean_Op(const ReduceMean_Op<DIM>& op) + ReduceMean_Op(const ReduceMean_Op& op) : OperatorTensor(op), Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -69,74 +66,51 @@ class ReduceMean_Op : public OperatorTensor, * @see Operator::ReduceMean_Op */ std::shared_ptr<Operator> clone() const override { - return std::make_shared<ReduceMean_Op<DIM>>(*this); + return std::make_shared<ReduceMean_Op>(*this); } - void computeOutputDims() override final { - if (!getInput(0)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); - } - if (!getInput(0)->empty()) { - // make Axes attribute positive - std::array<std::int32_t, DIM>& axes = this->template getAttr<ReduceMeanAttr::Axes>(); - std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) { - if (val < 0) - val+=static_cast<std::int32_t>(getInput(0)->nbDims()); - }); - std::sort(axes.begin(), axes.end()); - - // build output dimensions - std::vector<DimSize_t> outDims = getInput(0)->dims(); - if (this->template getAttr<ReduceMeanAttr::KeepDims>()) { - std::for_each(axes.begin(), axes.end(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; }); - } - else { - for (auto it = axes.crbegin(); it != axes.crend(); ++it) - outDims.erase(outDims.begin() + static_cast<std::size_t>(*it)); - } - - if(outDims.size()>0) - mOutputs[0]->resize(outDims); - else - mOutputs[0]->resize({1}); - } - } + void computeOutputDims() override final; - void setBackend(const std::string &name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; - static const std::vector<std::string> getInputsName(){ + static const std::vector<std::string> getInputsName() { return {"data_input"}; } - static const std::vector<std::string> getOutputsName(){ + static const std::vector<std::string> getOutputsName() { return {"data_output"}; } }; -template <std::array<DimSize_t, 1>::size_type DIM> -inline std::shared_ptr<Node> ReduceMean(const std::array<std::int32_t, DIM> &axes, +/** + * @brief Compute the mean value of a Tensor over the provided axes. Dimensions + * may be reduced by erasing the provided axes or not. + * + * @param axes Dimensions over which data mean should be computed. + * @param keep_dims Whether or not reduced dimensions are to be erased. + * @param name Name of the Operator. + * @return std::shared_ptr<Node> Node containing the Operator. + */ +inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes, DimSize_t keep_dims=1, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases - static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); - return std::make_shared<Node>(std::make_shared<ReduceMean_Op<static_cast<DimIdx_t>(DIM)>>(axes, keep_dims), name); + AIDGE_ASSERT(axes.size()<=MaxDim, "Too many kernel dimensions required by ReduceMean, not supported"); + return std::make_shared<Node>(std::make_shared<ReduceMean_Op>(axes, keep_dims), name); } // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction -template <DimSize_t DIM> -inline std::shared_ptr<Node> ReduceMean( - std::int32_t const (&axes)[DIM], - DimSize_t keep_dims = 1, - const std::string& name = "") { - static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); - return ReduceMean(to_array(axes), keep_dims, name); -} - -template <DimIdx_t DIM> -const std::string ReduceMean_Op<DIM>::Type = "ReduceMean"; +// template <DimSize_t DIM> +// inline std::shared_ptr<Node> ReduceMean( +// std::int32_t const (&axes)[DIM], +// DimSize_t keep_dims = 1, +// const std::string& name = "") { +// static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported"); +// return ReduceMean(to_array(axes), keep_dims, name); +// } + +// template <DimIdx_t DIM> +// const std::string ReduceMean_Op::Type = "ReduceMean"; } // namespace Aidge diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index 8914bbc9a9f3748276ead32aba8cb023ba14b1b7..060029bb87ea142728056b3817b8162d566cb458 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -12,7 +12,6 @@ #ifndef AIDGE_CORE_OPERATOR_RESHAPE_H_ #define AIDGE_CORE_OPERATOR_RESHAPE_H_ -#include <cassert> #include <memory> #include <vector> @@ -54,8 +53,8 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(Reshape_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -70,10 +69,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Reshape_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index 29ce0527a9b8b15c7b45c0b0241a83957abb5565..8f54ab217631ac69a4e16555f8e58f550ab0156c 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -9,18 +9,17 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__ -#define __AIDGE_CORE_OPERATOR_Scaling_H__ +#ifndef AIDGE_CORE_OPERATOR_SCALING_H_ +#define AIDGE_CORE_OPERATOR_SCALING_H_ #include <vector> #include <memory> -#include "aidge/utils/StaticAttributes.hpp" -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -56,7 +55,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Scaling_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Scaling_Op, *this, op.backend()); } else { mImpl = nullptr; } @@ -70,10 +69,7 @@ public: return std::make_shared<Scaling_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Scaling_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName() { return {"data_input"}; @@ -99,4 +95,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[] = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"}; } -#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */ +#endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */ diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp index ab97bf3211edb53d65a90d16dba5d0c66dfa33da..bea9fc45eaa7f17f71963106b5bd3e1340a48a92 100644 --- a/include/aidge/operator/Sigmoid.hpp +++ b/include/aidge/operator/Sigmoid.hpp @@ -39,7 +39,11 @@ public: Sigmoid_Op(const Sigmoid_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Sigmoid_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -51,10 +55,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Sigmoid_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 363c3c2b4ec397fdd62dc3260b63a0cd6d6c0081..f68aa17f480038d8ff7850577c438cfdc6704d59 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -56,7 +56,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Slice_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Slice_Op, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 943f69a588ebfedf28ec5ebb3a782e7510fa710a..d48dbc2b60e46eb5c074b8adae065383e29b1769 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -12,14 +12,10 @@ #ifndef AIDGE_CORE_OPERATOR_SOFTMAX_H_ #define AIDGE_CORE_OPERATOR_SOFTMAX_H_ -#include <cassert> #include <memory> #include <vector> - #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" @@ -56,7 +52,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Softmax_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Softmax_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -70,10 +66,7 @@ public: return std::make_shared<Softmax_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Softmax_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp index dd3fa541b9fd5177ddd3b9e8bcd781c0ea3a1867..f5ffa431192d73a703c1ce973cb485dadb31420d 100644 --- a/include/aidge/operator/Sqrt.hpp +++ b/include/aidge/operator/Sqrt.hpp @@ -12,16 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_SQRT_H_ #define AIDGE_CORE_OPERATOR_SQRT_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -46,7 +43,7 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(Sqrt_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Sqrt_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -60,10 +57,7 @@ public: return std::make_shared<Sqrt_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Sqrt_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 5683a9be5ea2278d92fe7da081f0c4a80ff9500d..fbcebcc9f62c23e9c60b5dff6f0d41c10d8b8717 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -12,16 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_SUB_H_ #define AIDGE_CORE_OPERATOR_SUB_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -46,8 +43,8 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(Sub_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(Sub_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -63,10 +60,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Sub_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp index ce0dc12a06d242d215c07dc6593bb7e2cb2c3c8a..3fd5377d30cfff864743dcab2da9e690e26e5263 100644 --- a/include/aidge/operator/Tanh.hpp +++ b/include/aidge/operator/Tanh.hpp @@ -12,15 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_TANH_H_ #define AIDGE_CORE_OPERATOR_TANH_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -39,7 +37,11 @@ public: Tanh_Op(const Tanh_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Tanh_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Tanh_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -51,10 +53,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Tanh_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index b040fc907dd5ac1f40a8a1885d27364785ba9188..1beb5781b9262669cd2acb6ce4ef3aae85843573 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -57,7 +57,7 @@ class Transpose_Op : public OperatorTensor, Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/include/aidge/recipes/GraphViewHelper.hpp b/include/aidge/recipes/GraphViewHelper.hpp index c6204cdffa5e580190b8cd3f1817788a12e00bc3..a2c571bf4ed164729f7c3416c814b913b4d07e6f 100644 --- a/include/aidge/recipes/GraphViewHelper.hpp +++ b/include/aidge/recipes/GraphViewHelper.hpp @@ -9,14 +9,14 @@ * ********************************************************************************/ -#ifndef AIDGE_CORE_UTILS_RECIPES_H_ -#define AIDGE_CORE_UTILS_RECIPES_H_ +#ifndef AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ +#define AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ #include <memory> #include <set> -#include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" +#include "aidge/data/Tensor.hpp" namespace Aidge { @@ -26,15 +26,21 @@ namespace Aidge { * @param graphview GraphView instance where Producers should be searched. * @return std::set<std::shared_ptr<Node>> */ -std::set<std::shared_ptr<Aidge::Node>> producers(std::shared_ptr<Aidge::GraphView> graphview) { - std::set<std::shared_ptr<Node>> res; - const std::set<std::shared_ptr<Node>> nodes = graphview->getNodes(); - - std::copy_if(nodes.cbegin(), - nodes.cend(), - std::inserter(res, res.begin()), - [](std::shared_ptr<Node> n){ return n->type() == "Producer"; }); - - return res; -} -} // namespace Aidge \ No newline at end of file +std::set<std::shared_ptr<Tensor>> producers(std::shared_ptr<GraphView> graphview); + + +// TODO: change for every Tensor of Operator Producer not constant +/** + * @brief Getter for every ``Tensor`` owned by an ``Operator`` inside the provided ``GraphView``. + * @note An ``Operator`` owns its output ``Tensor``s. + * + * @param graphview Pointer to the ``GraphView`` from which ``Tensor``s should be extracted. + * @return std::set<std::shared_ptr<Tensor>> Set of pointers to the ``Tensor``s. + */ +std::set<std::shared_ptr<Tensor>> parameters(std::shared_ptr<GraphView> graphview); + +void compile_gradient(std::shared_ptr<Aidge::GraphView> gv); + +} // namespace Aidge + +#endif /* AIDGE_CORE_UTILS_GRAPHVIEWHELPER_H_ */ diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index c737680bf3d9227161eed250c2cb52a443c37ab3..b25ebd3c8de3830174c11d93d6eb60c8703c6a0d 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -69,7 +69,7 @@ public: /** * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph. - * + * * @param data data input tensors */ void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data); @@ -79,6 +79,11 @@ public: */ void forward(bool forwardDims = true, bool verbose = false, std::vector<std::shared_ptr<Aidge::Tensor>> data = {}); + /** + * @brief Run the provided Computational Graph with a batch of data + */ + void backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instantiateGrad = true, bool verbose = false); + /** * @brief Save in a Markdown file the order of layers execution. * @param fileName Name of the generated file. diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp index 567270d63c092aef6411a4438f59b7770ee3d5bf..e116fa91cac4d3828e998c6a06825afb118ac52c 100644 --- a/include/aidge/utils/Registrar.hpp +++ b/include/aidge/utils/Registrar.hpp @@ -23,7 +23,7 @@ #include <functional> #include <map> -#include <cassert> +#include <vector> namespace Aidge { #ifdef PYBIND @@ -72,19 +72,18 @@ struct Registrar { } static bool exists(const registrar_key& key) { - const auto it = C::registry().find(key); - return (it != C::registry().end()); + return (C::registry().find(key) != C::registry().cend()); } static auto create(const registrar_key& key){ const auto it = C::registry().find(key); - AIDGE_ASSERT(it != C::registry().end(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key); + AIDGE_ASSERT(it != C::registry().cend(), "missing or invalid registrar key: {}\nDid you include/import the corresponding module?", key); return (*it).second; } static std::vector<registrar_key> getKeys(){ std::vector<registrar_key> keys; - for(auto keyValue : C::registry()) + for(const auto& keyValue : C::registry()) keys.push_back(keyValue.first); return keys; } diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp index 91d65484a122d6a651758e16eb0e925b6e0bfdd0..97cf817176c733000eda8da6c6a213ccc22f1dc4 100644 --- a/python_binding/backend/pybind_OperatorImpl.cpp +++ b/python_binding/backend/pybind_OperatorImpl.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> +#include <string> #include "aidge/operator/Operator.hpp" #include "aidge/backend/OperatorImpl.hpp" @@ -116,7 +117,7 @@ public: void init_OperatorImpl(py::module& m){ py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr()) - .def(py::init<const Operator&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>()) + .def(py::init<const Operator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>()) .def("forward", &OperatorImpl::forward) .def("backward", &OperatorImpl::backward) .def("get_nb_required_data", &OperatorImpl::getNbRequiredData) diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index 93389edf663a6154daf0b9ef2a7cc4095abc4d0f..b97af94ad583cf42e25fa3afc0697021f6dcadcc 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -76,6 +76,7 @@ void init_Tensor(py::module& m){ .def("set_datatype", &Tensor::setDataType, py::arg("datatype"), py::arg("copyCast") = true) .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true) .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims) + .def("grad", &Tensor::grad) .def("dtype", &Tensor::dataType) .def("size", &Tensor::size) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize) diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp index eae05d8e2c04a877e5942600d7120024f20c4788..f06a70f32999d942f6d060ba9b6df6360438c60d 100644 --- a/python_binding/graph/pybind_GraphView.cpp +++ b/python_binding/graph/pybind_GraphView.cpp @@ -31,6 +31,8 @@ void init_GraphView(py::module& m) { :type path: str )mydelimiter") .def("log_outputs", &GraphView::logOutputs, py::arg("path")) + .def("get_ordered_inputs", &GraphView::getOrderedInputs) + .def("get_ordered_outputs", &GraphView::getOrderedOutputs) .def("get_output_nodes", &GraphView::outputNodes, R"mydelimiter( Get set of output Nodes. diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp index 661c96bb835fa3ac719ab10dbf83e4137f1bb248..c3eeb192a88163be96f973a55e6ef7cc60ec48af 100644 --- a/python_binding/operator/pybind_Add.cpp +++ b/python_binding/operator/pybind_Add.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include "aidge/operator/Add.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp index c44c7b49ade1e47438f80f0b3f3a83c18eb4e0fa..ab52472b4576d4ab4adf05d3fed139ae40c75919 100644 --- a/python_binding/operator/pybind_AvgPooling.cpp +++ b/python_binding/operator/pybind_AvgPooling.cpp @@ -17,6 +17,7 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/AvgPooling.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp index 087c232dc6a2977169e19ce4bdf0807adfc13d93..9640141e03bcd811f5ce24c544c5cdbc9fe6b2f3 100644 --- a/python_binding/operator/pybind_BatchNorm.cpp +++ b/python_binding/operator/pybind_BatchNorm.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/BatchNorm.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp index 38d8a20cba1eafc255b1da313d35ad8be116620d..756686c209c33fe03f7bda4bbb53d8c3c71e8b4c 100644 --- a/python_binding/operator/pybind_Concat.cpp +++ b/python_binding/operator/pybind_Concat.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Concat.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp index d1016869c3fec9cbc10f2d2c86f685f8787b1d3b..adb0e108c409032c7e132016f5b92ed9f9233491 100644 --- a/python_binding/operator/pybind_Conv.cpp +++ b/python_binding/operator/pybind_Conv.cpp @@ -16,6 +16,7 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index bbb94c3773e825cd5ee852243fa8db7a5bd763da..19b3332a84037185afdc87fd90cb9c8fea2e64f8 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -17,6 +17,7 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/ConvDepthWise.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp index 2996e0bcae6d69d9ad2ef0d4d8eee8489cd8cdc8..e9bf26b629aa05090c9601103676cbc12ff4c88d 100644 --- a/python_binding/operator/pybind_Div.cpp +++ b/python_binding/operator/pybind_Div.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Div.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp index e1aef08ad597d92c4cf4b6d5a2cff487e438538e..c5fd53f2a665b5b816a3778e6f874cd04956e99e 100644 --- a/python_binding/operator/pybind_Erf.cpp +++ b/python_binding/operator/pybind_Erf.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Erf.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp index 0b13643cbd3ebb265dab62a1030729fca62dea62..ab1ed9ce20bec01e205cd6478c6a93df9f91a2fb 100644 --- a/python_binding/operator/pybind_FC.cpp +++ b/python_binding/operator/pybind_FC.cpp @@ -11,8 +11,9 @@ #include <pybind11/pybind11.h> -#include "aidge/operator/FC.hpp" #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/FC.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp index db6bdb15a2e6288b5f775d538a5e14f15d79d2c1..8c32acfe2bd7e0118c186be8fa1297ee16fe6f6c 100644 --- a/python_binding/operator/pybind_Gather.cpp +++ b/python_binding/operator/pybind_Gather.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Gather.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp index a5435a3ce67ffe0f75b8bbda19e3d552baeef5ef..31ee946fc99df40133ff04965c762f9ddae0d131 100644 --- a/python_binding/operator/pybind_GenericOperator.cpp +++ b/python_binding/operator/pybind_GenericOperator.cpp @@ -15,6 +15,7 @@ #include <stdio.h> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/OperatorTensor.hpp" namespace py = pybind11; diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp index b1b1e8888976c578ff490f35776c890ba59911dc..4538b72fcb012a35ca0ebf3a15449a4b5cfff7a8 100644 --- a/python_binding/operator/pybind_Identity.cpp +++ b/python_binding/operator/pybind_Identity.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Identity.hpp" #include "aidge/operator/Operator.hpp" diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp index 66b2c34a9a558d20d90f71dd590d9fe8c370c10d..9ad47e7a391698ae9b30d35d94f05e8b80138590 100644 --- a/python_binding/operator/pybind_LeakyReLU.cpp +++ b/python_binding/operator/pybind_LeakyReLU.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/LeakyReLU.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp index 383bad54be08905c5e9248ab3f7bf5c83bddc836..73bfac04a78ec9b972ec984466dbae582b2c03dc 100644 --- a/python_binding/operator/pybind_Matmul.cpp +++ b/python_binding/operator/pybind_Matmul.cpp @@ -11,8 +11,9 @@ #include <pybind11/pybind11.h> -#include "aidge/operator/MatMul.hpp" #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/MatMul.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp index 8a5e3db9decd01bd5fabe5897847f939e7fa02b3..91fa0489d8bedd16dd33424e33d7e15eea3e3ecb 100644 --- a/python_binding/operator/pybind_MaxPooling.cpp +++ b/python_binding/operator/pybind_MaxPooling.cpp @@ -17,10 +17,10 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/MaxPooling.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" namespace py = pybind11; namespace Aidge { diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp index 5354f01ca508eb6ff04304d1f4072f431339973c..47c84c0e52f605a5466a63a5a5d0851fecedd2f8 100644 --- a/python_binding/operator/pybind_Mul.cpp +++ b/python_binding/operator/pybind_Mul.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Mul.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp index 05d6cd089754d1155e1506b4a491af7919bc4d31..589bad0be4ebfac10b476990e4501d6c219abbb1 100644 --- a/python_binding/operator/pybind_Operator.cpp +++ b/python_binding/operator/pybind_Operator.cpp @@ -11,10 +11,12 @@ ********************************************************************************/ #include <pybind11/pybind11.h> +#include <pybind11/stl.h> + #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" -#include <pybind11/stl.h> namespace py = pybind11; namespace Aidge { diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp index d0a4d024384ca158c1c9b009f5267aedcb9b8470..4cd7306494730036f90dd6311bc80d821ebe8f4d 100644 --- a/python_binding/operator/pybind_OperatorTensor.cpp +++ b/python_binding/operator/pybind_OperatorTensor.cpp @@ -10,7 +10,9 @@ ********************************************************************************/ #include <pybind11/pybind11.h> + #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Operator.hpp" #include <pybind11/stl.h> diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp index d784a0d6ab7803bbc078b12b39df9ad8ef2f768e..1cd9f074fe5241be11da0ea7d0d1ed5a1c5869c2 100644 --- a/python_binding/operator/pybind_Pad.cpp +++ b/python_binding/operator/pybind_Pad.cpp @@ -9,14 +9,14 @@ * ********************************************************************************/ +#include <array> #include <pybind11/pybind11.h> #include <pybind11/stl.h> -#include <iostream> #include <string> #include <vector> -#include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pad.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp index 91726fc1d4721df1be712a26721d09b1a98fd9a2..baae552270a4776d292047140e213dbe1566d35e 100644 --- a/python_binding/operator/pybind_Pop.cpp +++ b/python_binding/operator/pybind_Pop.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pop.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp index 03e822adbd326b6ad9693d58b53cd9f8f4bc3ac8..9e9ef772cadddb1c7928060b503c388b094ed9f4 100644 --- a/python_binding/operator/pybind_Pow.cpp +++ b/python_binding/operator/pybind_Pow.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pow.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp index 025c8c5dd1651b3466a22e88f0966a7f51d2c109..eb74515915c252d50a2522cae6d6f4c6832ab3ef 100644 --- a/python_binding/operator/pybind_Producer.cpp +++ b/python_binding/operator/pybind_Producer.cpp @@ -12,11 +12,11 @@ #include <pybind11/pybind11.h> #include <pybind11/stl.h> -#include "aidge/utils/Types.h" // #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" -#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" namespace py = pybind11; namespace Aidge { diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp index f08c67cb98b629b8d1b61471c6f50a0de4c421d6..57601e25607a40c44c400fe75965d83050a146ed 100644 --- a/python_binding/operator/pybind_ReLU.cpp +++ b/python_binding/operator/pybind_ReLU.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/ReLU.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp index fbec6864042cf16a877faa67b351be5eb3f9b1eb..599a648a3f2733acd49bbbc293cd30734e8ea2ff 100644 --- a/python_binding/operator/pybind_ReduceMean.cpp +++ b/python_binding/operator/pybind_ReduceMean.cpp @@ -9,13 +9,14 @@ * ********************************************************************************/ +#include <array> #include <pybind11/pybind11.h> #include <pybind11/stl.h> #include <string> #include <vector> -#include <array> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/ReduceMean.hpp" #include "aidge/utils/Types.h" @@ -23,22 +24,22 @@ namespace py = pybind11; namespace Aidge { -template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) { - const std::string pyClassName("ReduceMeanOp" + std::to_string(DIM) + "D"); - py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>( +void declare_ReduceMeanOp(py::module &m) { + const std::string pyClassName("ReduceMeanOp"); + py::class_<ReduceMean_Op, std::shared_ptr<ReduceMean_Op>, Attributes, OperatorTensor>( m, pyClassName.c_str(), py::multiple_inheritance()) - .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName) - .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName) - .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName) + .def("get_inputs_name", &ReduceMean_Op::getInputsName) + .def("get_outputs_name", &ReduceMean_Op::getOutputsName) + .def("attributes_name", &ReduceMean_Op::staticGetAttrsName) ; - declare_registrable<ReduceMean_Op<DIM>>(m, pyClassName); + declare_registrable<ReduceMean_Op>(m, pyClassName); - m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes, + m.def("ReduceMean", [](const std::vector<int>& axes, DimSize_t keepDims, const std::string& name) { - AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM); + // AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM); - return ReduceMean<DIM>(to_array<DIM>(axes.begin()), keepDims, name); + return ReduceMean(axes, keepDims, name); }, py::arg("axes"), py::arg("keep_dims") = 1, py::arg("name") = ""); @@ -46,9 +47,9 @@ template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) { void init_ReduceMean(py::module &m) { - declare_ReduceMeanOp<1>(m); - declare_ReduceMeanOp<2>(m); - declare_ReduceMeanOp<3>(m); + declare_ReduceMeanOp(m); +// declare_ReduceMeanOp<2>(m); +// declare_ReduceMeanOp<3>(m); // FIXME: // m.def("ReduceMean1D", static_cast<NodeAPI(*)(const char*, int, int, int const diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp index dc6a9b4ec5de297df7c1c52877974ab84d55a0c2..0e336db28ddba4629e61d30e026befe4240c40b6 100644 --- a/python_binding/operator/pybind_Reshape.cpp +++ b/python_binding/operator/pybind_Reshape.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Reshape.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp index 2393e56c10ef37e4eee078fe6f8bee4abd77ac39..8ffa8581593af9dc994baa566475317bcd96d475 100644 --- a/python_binding/operator/pybind_Sigmoid.cpp +++ b/python_binding/operator/pybind_Sigmoid.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sigmoid.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp index 3bb1b082c19b98447726b0fb980cbd8688fd5ba3..558fc98c172ea1a264ee8ac3ebbc70e09eba826d 100644 --- a/python_binding/operator/pybind_Slice.cpp +++ b/python_binding/operator/pybind_Slice.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Slice.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp index bac553387a00856f2d4e01dea95e630a59666938..837f3ed2b92aeab5739d07a04b071040806d8a1f 100644 --- a/python_binding/operator/pybind_Softmax.cpp +++ b/python_binding/operator/pybind_Softmax.cpp @@ -12,6 +12,7 @@ #include <pybind11/pybind11.h> #include <string> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Softmax.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp index 33d46e02caee1046cbbdbaaa186c4898db5b10c1..7065b828eb18d77edce49726dd903045c7952977 100644 --- a/python_binding/operator/pybind_Sqrt.cpp +++ b/python_binding/operator/pybind_Sqrt.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sqrt.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp index 1b858d1527eb3969e2acad9c0206311ff2981f17..e031040dfe8373c07d1524cbe4f75f3744e2f312 100644 --- a/python_binding/operator/pybind_Sub.cpp +++ b/python_binding/operator/pybind_Sub.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Sub.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp index 2f3140039b030505af860352372c865c1aab05e3..a5c2f9dd5f2eab17e296f82788726210f976bd0d 100644 --- a/python_binding/operator/pybind_Tanh.cpp +++ b/python_binding/operator/pybind_Tanh.cpp @@ -11,6 +11,7 @@ #include <pybind11/pybind11.h> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Tanh.hpp" #include "aidge/operator/OperatorTensor.hpp" diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp index 59482cf481849738ed0656d8c55188b2ade51954..f6e2f2225e4858d3385c5d0140a863e7e7705652 100644 --- a/python_binding/operator/pybind_Transpose.cpp +++ b/python_binding/operator/pybind_Transpose.cpp @@ -17,10 +17,10 @@ #include <array> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/Transpose.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/OperatorTensor.hpp" +#include "aidge/operator/Transpose.hpp" #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" namespace py = pybind11; namespace Aidge { diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index 155855214cd00009e82e2ca59decf2e96adc0a20..63e5100ac65b5582c7236c2b3467a7d1debcaa36 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -69,6 +69,7 @@ void init_GraphRegex(py::module&); void init_MatchSolution(py::module&); void init_Recipes(py::module&); +void init_GraphViewHelper(py::module&); void init_Scheduler(py::module&); void init_TensorUtils(py::module&); @@ -131,6 +132,7 @@ void init_Aidge(py::module& m) { init_MatchSolution(m); init_Recipes(m); + init_GraphViewHelper(m); init_Scheduler(m); init_TensorUtils(m); init_Filler(m); diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ac56fb4b43eb5b0a737157ec9e64c6771a692816 --- /dev/null +++ b/python_binding/recipes/pybind_GraphViewHelper.cpp @@ -0,0 +1,28 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <pybind11/pybind11.h> +#include <pybind11/stl.h> + +#include <memory> +#include <set> + +#include "aidge/graph/GraphView.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/recipes/GraphViewHelper.hpp" + +namespace py = pybind11; + +namespace Aidge { +void init_GraphViewHelper(py::module &m) { + m.def("producers", &producers, py::arg("graphview")); +} +} // namespace Aidge diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp index 170aa6c271a4f08ff5ad2801b754b647fee56df6..1b541b60672cc28cfe318b7bcc029627d6491818 100644 --- a/python_binding/scheduler/pybind_Scheduler.cpp +++ b/python_binding/scheduler/pybind_Scheduler.cpp @@ -21,6 +21,7 @@ void init_Scheduler(py::module& m){ py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>>(m, "SequentialScheduler") .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view")) .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false, py::arg("data")=std::vector<Tensor>()) + .def("backward", &SequentialScheduler::backward, py::arg("data"), py::arg("instanciate_grad")=true, py::arg("verbose")=false) .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name")) .def("resetScheduling", &SequentialScheduler::resetScheduling) .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false) diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp index 1439391b2e22fe0bea3b5a7692941afc67bc1c6b..48d615a2b0a5ccb5a51a3edb28ac68dbd7d67501 100644 --- a/src/backend/OperatorImpl.cpp +++ b/src/backend/OperatorImpl.cpp @@ -10,14 +10,16 @@ ********************************************************************************/ #include <cassert> +#include <string> #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" -Aidge::OperatorImpl::OperatorImpl(const Operator& op): +Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend): mOp(op), + mBackend(backend), mNbConsumedData(mOp.nbInputs(), 0), mNbProducedData(mOp.nbOutputs(), 0) { diff --git a/src/backend/cpu/data/TensorImpl.cpp b/src/backend/cpu/data/TensorImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..da90197e912fbeabc2f28bd3bedd91cc6f29e466 --- /dev/null +++ b/src/backend/cpu/data/TensorImpl.cpp @@ -0,0 +1,107 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/backend/cpu/data/TensorImpl.hpp" + +#include <algorithm> // std::copy +#include <cstddef> // std::size_t +#include <cstdint> // std::uint8_t, std::int8_t, std::uint16_t, std::int16_t, + // std::uint32_t, std::int32_t, std::uint64_t, std::int64_t +#include <string> + +#include "aidge/data/half.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + + +template <typename T> +bool Aidge::TensorImpl_cpu<T>::operator==(const Aidge::TensorImpl &other) const { + const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T>&>(other); + AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts); + + std::size_t i = 0; + for (; + i < mNbElts && + *static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i)); + ++i) + {} + return i == mNbElts; +} + +template <typename T> +void Aidge::TensorImpl_cpu<T>::zeros() { + if (mData.empty()) { + lazyInit(); + } + for (std::size_t i = 0; i < mData.size(); ++i) { + *(mData.data() + i) = T(0); + } +} + +template <typename T> +void Aidge::TensorImpl_cpu<T>::copyCast(const void *src, const Aidge::DataType srcDt, Aidge::NbElts_t length, Aidge::NbElts_t offset) { + if (length == 0) { + return; + } + + T* dstT = static_cast<T *>(rawPtr(offset)); + AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); + switch (srcDt) + { + case DataType::Float64: + std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, + dstT); + break; + case DataType::Float32: + std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, + dstT); + break; + case DataType::Float16: + std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, + dstT); + break; + case DataType::Int64: + std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, + dstT); + break; + case DataType::UInt64: + std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, + dstT); + break; + case DataType::Int32: + std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, + dstT); + break; + case DataType::UInt32: + std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, + dstT); + break; + case DataType::Int16: + std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, + dstT); + break; + case DataType::UInt16: + std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, + dstT); + break; + case DataType::Int8: + std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, + dstT); + break; + case DataType::UInt8: + std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, + dstT); + break; + default: + AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); + break; + } +} \ No newline at end of file diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index 0d5359156bb8ff23a5b4bdaea93d30b65f8ba702..b350c5bf0fa2b1af6f102c3a74486c159a7505b4 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -19,6 +19,9 @@ #include "aidge/utils/Types.h" Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) { + if (this == &other) { + return *this; + } resize(other.dims(), other.strides()); setDataType(other.dataType(), false); // do not convert existing data if (other.hasImpl()) { @@ -253,7 +256,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) { AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast non-contiguous tensor"); // Current Tensor has necessarily a data type, but may not have backend - if (!getImpl()) { + if (!hasImpl()) { // If no backend was set for the current tensor, use the same as src const auto deviceSrc = src.getImpl()->device(); setBackend(deviceSrc.first, deviceSrc.second); @@ -272,7 +275,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) { AIDGE_ASSERT(src.isContiguous(), "cannot copy from non-contiguous tensor"); // Current Tensor has necessarily a data type, but may not have backend - if (!getImpl()) { + if (!hasImpl()) { // If no backend was set for the current tensor, use the same as src const auto deviceSrc = src.getImpl()->device(); setBackend(deviceSrc.first, deviceSrc.second); diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 40d8926cb1d5f0f89ad19ff61252bf7b5e74f49c..586adbff50facec5d9fab4a447011b34e8090a2b 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -9,26 +9,36 @@ * ********************************************************************************/ -#include <algorithm> -#include <cassert> -#include <iterator> -#include <memory> -#include <utility> -#include <numeric> +#include "aidge/graph/GraphView.hpp" +#include <algorithm> // std::find, std::set_intersection, std::transform +#include <cassert> +#include <stdexcept> // std::runtime_error +#include <cstddef> // std::size_t +#include <cstdio> // std::fclose, std::fopen #include <fmt/format.h> -#include <fmt/ranges.h> +#include <iterator> // std::back_inserter, std::distance, std::inserter, + // std::next +#include <map> +#include <memory> // std::dynamic_pointer_cast, std::static_pointer_cast +#include <set> +#include <string> // std::to_string +#include <utility> // std::make_pair, std::pair +#include <vector> -#include "aidge/graph/Connector.hpp" -#include "aidge/utils/Types.h" -#include "aidge/graph/GraphView.hpp" #include "aidge/data/Tensor.hpp" -#include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" #include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/MetaOperator.hpp" -#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/operator/Producer.hpp" #include "aidge/utils/Directories.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + + +const std::shared_ptr<Aidge::Node> Aidge::GraphView::operator[](const std::string& nodeName) const { + return (mNodeRegistry.find(nodeName) != mNodeRegistry.cend()) ? mNodeRegistry.at(nodeName) : nullptr; +} /////////////////////////////////////////////////////// // FUNCTIONAL DESCRIPTION @@ -59,9 +69,10 @@ Aidge::Connector Aidge::GraphView::operator()( // INNER /////////////////////////////////////////////////////// -std::string Aidge::GraphView::name() const { return mName; } +bool Aidge::GraphView::inView(const std::shared_ptr<Aidge::Node>& nodePtr) const { + return mNodes.find(nodePtr) != mNodes.cend(); +} -void Aidge::GraphView::setName(const std::string &name) { mName = name; } void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProducers) const { auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((path + ".mmd").c_str(), "w"), &std::fclose); @@ -120,8 +131,8 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd continue; } IOIndex_t outputIdx = 0; - for (auto childs : node_ptr->getOrderedChildren()) { - for (auto child : childs) { + for (const auto& childs : node_ptr->getOrderedChildren()) { + for (const auto& child : childs) { if (child != nullptr) { IOIndex_t inputIdx = 0; for (auto parent : child->inputs()) { @@ -228,6 +239,33 @@ void Aidge::GraphView::setRootNode(NodePtr node) { // TENSOR MANAGEMENT /////////////////////////////////////////////////////// +std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const { + std::set<std::shared_ptr<Aidge::Node>> nodes; + for (const auto& node : mInputNodes) { + nodes.insert(node.first); + } + return nodes; +} + +std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::outputNodes() const { + std::set<std::shared_ptr<Aidge::Node>> nodes; + for (const auto& node : mOutputNodes) { + nodes.insert(node.first); + } + return nodes; +} + +bool Aidge::GraphView::isInputNode(const std::shared_ptr<Aidge::Node>& nodePtr) const { + const auto nodes = inputNodes(); + return (nodes.find(nodePtr) != nodes.cend()); +} + +bool Aidge::GraphView::isOutputNode(const std::shared_ptr<Aidge::Node>& nodePtr) const { + const auto nodes = outputNodes(); + return (nodes.find(nodePtr) != nodes.cend()); +} + + void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOIndex_t>>& inputs) { size_t nbInputs = 0; std::vector<std::pair<NodePtr, IOIndex_t>> ignoredInputs(mInputNodes); @@ -420,14 +458,14 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ while (!listNodes.empty()); } -void Aidge::GraphView::setBackend(const std::string &backend, DeviceIdx_t device) { - for (auto node : getNodes()) { +void Aidge::GraphView::setBackend(const std::string &backend, const DeviceIdx_t device) const { + for (const auto& node : getNodes()) { node->getOperator()->setBackend(backend, device); } } -void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) { - for (auto node : getNodes()) { +void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) const { + for (const auto& node : getNodes()) { node->getOperator()->setDataType(datatype); } } @@ -661,11 +699,9 @@ bool Aidge::GraphView::add(std::pair<NodePtr, std::set<NodePtr>> nodes, bool inc } bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph) { - if (mRootNode == nullptr) { - mRootNode = graph->getRootNode(); - } - - return add(graph->getNodes(), false); + // set the rootNode to the other graphView rootNode if no rootNode yet + mRootNode = mRootNode ? mRootNode : graph->rootNode(); + return add(graph->getNodes(), false); } void Aidge::GraphView::addChild(std::shared_ptr<Node> toOtherNode, diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index a54302d06059d43336800d81e4d18744b6243785..85bc4b7aef53e8064a8f31815a42689013880812 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -14,12 +14,24 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Add.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" const std::string Aidge::Add_Op::Type = "Add"; +Aidge::Add_Op::Add_Op(const Add_Op& op) + : OperatorTensor(op) +{ + if (op.mImpl) { + SET_IMPL_MACRO(Add_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } +} + void Aidge::Add_Op::computeOutputDims() { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input @@ -59,3 +71,8 @@ void Aidge::Add_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } } + +void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(Add_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp index f09d8eb83c6a6dae6416ffebcc01b22fb479a862..3e594b49404999fee10eed3a22a7c0a78f765df0 100644 --- a/src/operator/Cast.cpp +++ b/src/operator/Cast.cpp @@ -9,9 +9,17 @@ * ********************************************************************************/ -#include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Cast.hpp" +#include <memory> +#include <string> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Cast_Op::Type = "Cast"; void Aidge::Cast_Op::forward() { @@ -24,3 +32,8 @@ void Aidge::Cast_Op::forward() { runHooks(); } + +void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Cast_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp index eafcd126480df6da2c0127bdbb896d3ce98d0e0a..7df5b6dbf6122da44aed280da0d717232ba42fef 100644 --- a/src/operator/Concat.cpp +++ b/src/operator/Concat.cpp @@ -9,8 +9,49 @@ * ********************************************************************************/ +#include "aidge/operator/Concat.hpp" + #include <string> +#include <vector> -#include "aidge/operator/Concat.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Concat_Op::Type = "Concat"; + +void Aidge::Concat_Op::computeOutputDims() { + // Every input is non-empty with the same number of dimensions + bool associated = (getInput(0) != nullptr); + associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input + auto outputDims = getInput(0)->dims(); + const auto firstInputNbDims = getInput(0) -> nbDims(); + for (IOIndex_t i = 1; i < nbInputs(); ++i) { + if (!getInput(i)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); + } + + if (getInput(i)->nbDims() == firstInputNbDims) { + for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) { + if (dim == getAttr<ConcatAttr::Axis>()) { + outputDims[dim] += getInput(i)->dims()[dim]; + } + else { + associated &= (getInput(i)->dims()[dim] == outputDims[dim]); + } + } + } + else { + associated = false; + break; + } + } + if (associated) { + getOutput(0)->resize(outputDims); + } +} -const std::string Aidge::Concat_Op::Type = "Concat"; \ No newline at end of file +void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(Concat_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp index 6b55338f4ab7ac9131231fcced21869274c1bd47..5ffe5f08dbcbfe42c406846990c432a7fbd325e0 100644 --- a/src/operator/Div.cpp +++ b/src/operator/Div.cpp @@ -14,6 +14,7 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Div.hpp" #include "aidge/utils/Types.h" @@ -50,4 +51,10 @@ void Aidge::Div_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + + +void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Div_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp index 387af4edf417f8c7ac6ee9b8b2b7069179ad59cb..81c87f10b10210c2af203a05df53e3330bb33b72 100644 --- a/src/operator/Erf.cpp +++ b/src/operator/Erf.cpp @@ -9,8 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/Erf.hpp" + #include <string> -#include "aidge/operator/Erf.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Erf_Op::Type = "Erf"; -const std::string Aidge::Erf_Op::Type = "Erf"; \ No newline at end of file +void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Erf_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index 32114f5bf9e0d160db9fdc2d1971481be0b4e703..9865d64f6a0b87be96244bc4b39c91b605f02b6f 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -9,8 +9,52 @@ * ********************************************************************************/ +#include "aidge/operator/FC.hpp" + +#include <memory> #include <string> +#include <vector> -#include "aidge/operator/FC.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::FC_Op::Type = "FC"; + +void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { + AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs()); + AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type"); + // TODO: FIXME: check this, because data dims may not be initialized at this point... + //if (inputIdx == 2) { + // assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0)); + // assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1); + //} + mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); + if (inputIdx == 0 && getInput(0)->nbDims() == 1) + mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); +} + +void Aidge::FC_Op::computeOutputDims() { + bool associated = true; + for (IOIndex_t i = 0; i < nbInputs(); ++i) { + if (!getInput(i)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); + } + associated &= !(getInput(i)->empty()); + } + if (associated) { + // <batch, OutChannels> + mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()}); + } +} + +void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(FC_Op, *this, name); + mOutputs[0]->setBackend(name, device); -const std::string Aidge::FC_Op::Type = "FC"; \ No newline at end of file + // By default, automatically set backend for weight and bias inputs + getInput(1)->setBackend(name, device); + getInput(2)->setBackend(name, device); +} diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index b5f9d738a0280b3bacdb2ce201c8303b2b4d0a1f..259e6513994970eb7e677f44c981888388825fae 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -9,15 +9,18 @@ * ********************************************************************************/ -#include <cstddef> -#include <cstdint> +#include "aidge/operator/Gather.hpp" + +#include <cstddef> // std::size_t +#include <cstdint> // std::int64_t #include <string> #include <vector> -#include "aidge/operator/Gather.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" + const std::string Aidge::Gather_Op::Type = "Gather"; void Aidge::Gather_Op::computeOutputDims() { @@ -44,4 +47,9 @@ void Aidge::Gather_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + +void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Gather_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp index 5556f4ff5c87d1adc23f5bff1aaf90c230de06cc..3eae49b69ce639529d49dd1c0d241f12ece5d98b 100644 --- a/src/operator/GenericOperator.cpp +++ b/src/operator/GenericOperator.cpp @@ -9,13 +9,48 @@ * ********************************************************************************/ +#include "aidge/operator/GenericOperator.hpp" + +#include <cstddef> // std::size_t #include <vector> -#include "aidge/operator/GenericOperator.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/ErrorHandling.hpp" const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity - = [](const std::vector<std::vector<size_t>>& inputsDims) { return inputsDims; }; + = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; }; const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs) { - return [nbOutputs, inputIdx](const std::vector<std::vector<size_t>>& inputsDims) { return std::vector<std::vector<size_t>>(nbOutputs, inputsDims[inputIdx]); }; + return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); }; } + +void Aidge::GenericOperator_Op::computeOutputDims() { + if (mComputeOutputDims) { + std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>()); + for (std::size_t i = 0; i < nbInputs(); ++i) { + if (getInput(i)) { + inputsDims[i] = getInput(i)->dims(); + } + } + + const auto& outputsDims = mComputeOutputDims(inputsDims); + AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs"); + for (std::size_t i = 0; i < nbOutputs(); ++i) { + mOutputs[i]->resize(outputsDims[i]); + } + } + else { + AIDGE_ASSERT(false, "Cannot compute output dim of a GenericOperator"); + } +} + +bool Aidge::GenericOperator_Op::outputDimsForwarded() const { + if (mComputeOutputDims) { + return !(mOutputs[0]->empty()); + } + else { + AIDGE_ASSERT(false, "GenericOperator cannot forward dims"); + return false; + } +} \ No newline at end of file diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp index f48c7ca81d6abd1d5150f54eb7d98bf109307d33..56899875338d487294163aa018e0d98b5f7a5269 100644 --- a/src/operator/MatMul.cpp +++ b/src/operator/MatMul.cpp @@ -13,6 +13,7 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/MatMul.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -70,3 +71,8 @@ void Aidge::MatMul_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } } + +void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(MatMul_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp index 6e34c1a2005f551c255e9b7441e853015354337f..6e54a234d2fc78c8e8e9a43a7528709c8e51adc4 100644 --- a/src/operator/Memorize.cpp +++ b/src/operator/Memorize.cpp @@ -9,9 +9,17 @@ * ********************************************************************************/ -#include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Memorize.hpp" +#include <memory> +#include <string> +#include <vector> + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Memorize_Op::Type = "Memorize"; void Aidge::Memorize_Op::computeOutputDims() { @@ -33,6 +41,11 @@ void Aidge::Memorize_Op::computeOutputDims() { } } +void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Memorize_Op>::create({name})(*this); + mOutputs[0]->setBackend(name, device); +} + bool Aidge::Memorize_Op::outputDimsForwarded() const { // Only check the output dims bool forwarded = true; diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp index 883185021b395b42e5c47ef0461ebc0614f14456..45e7556265d1af4e95e50be4cf60e8067ded332f 100644 --- a/src/operator/MetaOperator.cpp +++ b/src/operator/MetaOperator.cpp @@ -10,9 +10,16 @@ ********************************************************************************/ #include "aidge/operator/MetaOperator.hpp" + +#include <cstddef> // std::size_t +#include <memory> +#include <string> + +#include "aidge/data/Tensor.hpp" +#include "aidge/graph/GraphView.hpp" #include "aidge/utils/ErrorHandling.hpp" -Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph) +Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph) : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()), mGraph(graph) { diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp index d4a594e95b2695b496fc28b8e8a7fcf3442e9253..89bef9e0edcf6731dfbaf9ebf48ebddf5b71e815 100644 --- a/src/operator/Mul.cpp +++ b/src/operator/Mul.cpp @@ -10,14 +10,16 @@ ********************************************************************************/ #include <cstddef> // std::size_t +#include <memory> #include <stdexcept> // std::runtime_error #include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Mul.hpp" -#include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" const std::string Aidge::Mul_Op::Type = "Mul"; @@ -53,4 +55,9 @@ void Aidge::Mul_Op::computeOutputDims() { else if (!getInput(0)->empty() && !getInput(1)->empty()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims()); } -} \ No newline at end of file +} + +void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Mul_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index c0ada265410f9bc46aab3b43fae270f1e74dd5eb..b85c18040ad84a1e9b1ea1f8b475c32260b6587a 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -19,6 +19,32 @@ #include "aidge/utils/ErrorHandling.hpp" +Aidge::OperatorTensor::OperatorTensor(const std::string& type, + const IOIndex_t nbData, + const IOIndex_t nbParam, + const IOIndex_t nbOut) +: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor), + mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)), + mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) { + for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) { + mOutputs[i] = std::make_shared<Tensor>(); + mOutputs[i]->setDataType(DataType::Float32); + } +} + + +Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other) + : Operator(other), + mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), + mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { + for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { + mOutputs[i] = std::make_shared<Tensor>(); + // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); + // datatype already copied + } +} + + void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type"); @@ -45,6 +71,9 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::share } } +std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawInput(const Aidge::IOIndex_t inputIdx) const { + return std::static_pointer_cast<Data>(getInput(inputIdx)); +} const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const { AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); return mInputs[inputIdx]; @@ -53,13 +82,23 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidg void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) { AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); - *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data); + const auto& data_tensor = std::dynamic_pointer_cast<Tensor>(data); + // if (mImpl) + // AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend()); + *mOutputs[outputIdx] = *data_tensor; } void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) { AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); - *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data)); + auto&& data_tensor = std::dynamic_pointer_cast<Tensor>(data); + // if (mImpl) + // AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend()); + *mOutputs[outputIdx] = std::move(*data_tensor); +} + +std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawOutput(const Aidge::IOIndex_t outputIdx) const { + return std::static_pointer_cast<Data>(getOutput(outputIdx)); } const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const { diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp index 3dd65eb4d34266f6e419bdc86362b8da4a55fdf0..06999e301ce0968b2d9979e47f412c02e59de3ad 100644 --- a/src/operator/Pop.cpp +++ b/src/operator/Pop.cpp @@ -9,9 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/Pop.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Pop.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Pop_Op::Type = "Pop"; @@ -36,3 +44,8 @@ void Aidge::Pop_Op::forward() { Operator::forward(); ++this->template getAttr<PopAttr::ForwardStep>(); } + +void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Pop_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp index 5e29eae0c0f42e7d566a933e9409766026369dad..72a04de04fda8a432309de8b4a69b1dfb6af1370 100644 --- a/src/operator/Pow.cpp +++ b/src/operator/Pow.cpp @@ -15,6 +15,7 @@ #include <vector> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Pow.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -50,4 +51,9 @@ void Aidge::Pow_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } +} + +void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Pow_Op, *this, name); + mOutputs[0]->setBackend(name, device); } \ No newline at end of file diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp index 7bccbe763b90f2697997a889b30b610e4b531334..43e991288c483f07138a2b236a2c4925ea0a3754 100644 --- a/src/operator/Producer.cpp +++ b/src/operator/Producer.cpp @@ -9,8 +9,114 @@ * ********************************************************************************/ +#include "aidge/operator/Producer.hpp" + +#include <cstddef> +#include <array> +#include <memory> #include <string> -#include "aidge/operator/Producer.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Producer_Op::Type = "Producer"; + + +Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant) + : OperatorTensor(Type, 0, 0, 1), + Attributes_(attr<ProdAttr::Constant>(constant)) +{ + mOutputs[0] = tensor; // copy the pointer of the Tensor +#ifdef PYBIND + if(Py_IsInitialized()) { + auto obj = py::cast(&(*this)); + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } else { + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } +#else + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); +#endif +} + +/** + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). + * @param op OperatorTensor to copy. + */ +Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op) + : OperatorTensor(op), + Attributes_(op) +{ + mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0))); +#ifdef PYBIND + if(Py_IsInitialized()) { + auto obj = py::cast(&(*this)); + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } else { + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); + } +#else + setImpl((mOutputs[0]->hasImpl()) ? + (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}) ? + Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : + std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend())) : + nullptr); +#endif + // if (mOutputs[0]->hasImpl()) { + // if (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){ + // setImpl(Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this)); + // } + // else { + // mImpl = std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend()); + // } + + // } else { + // mImpl = nullptr; + // } +} + +void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { +#ifdef PYBIND + if(Py_IsInitialized()) { + auto obj = py::cast(&(*this)); + setImpl((Registrar<Producer_Op>::exists({name})) ? + Registrar<Producer_Op>::create(name)(*this) : + std::make_shared<OperatorImpl>(*this, name)); + } else { + setImpl((Registrar<Producer_Op>::exists({name})) ? + Registrar<Producer_Op>::create(name)(*this) : + std::make_shared<OperatorImpl>(*this, name)); + } +#else + setImpl((Registrar<Producer_Op>::exists({name})) ? + Registrar<Producer_Op>::create(name)(*this) : + std::make_shared<OperatorImpl>(*this, name)); +#endif + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp index 0f7874acfe7d865ea8c56d4bca02b51864480df6..7b945a7d62ab0ef7f73a25f6f74430e725d17b48 100644 --- a/src/operator/ReLU.cpp +++ b/src/operator/ReLU.cpp @@ -9,8 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/ReLU.hpp" + +#include <memory> #include <string> -#include "aidge/operator/ReLU.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::ReLU_Op::Type = "ReLU"; -const std::string Aidge::ReLU_Op::Type = "ReLU"; \ No newline at end of file +void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(ReLU_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0de676e22ec668a9b41d7d61f184465d431715a2 --- /dev/null +++ b/src/operator/ReduceMean.cpp @@ -0,0 +1,61 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include "aidge/operator/ReduceMean.hpp" + +#include <algorithm> // std::for_each, std::sort +#include <cstddef> // std::size_t +#include <cstdint> // std::int32_t +#include <memory> +#include <stdexcept> // std::runtime_error +#include <string> +#include <vector> + +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::ReduceMean_Op::Type = "ReduceMean"; + +void Aidge::ReduceMean_Op::computeOutputDims() { + if (!getInput(0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); + } + if (!getInput(0)->empty()) { + // make Axes attribute positive + std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>(); + std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) { + if (val < 0) + val+=static_cast<std::int32_t>(getInput(0)->nbDims()); + }); + std::sort(axes.begin(), axes.end()); + + // build output dimensions + std::vector<DimSize_t> outDims = getInput(0)->dims(); + if (this->template getAttr<ReduceMeanAttr::KeepDims>()) { + std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; }); + } + else { + for (auto it = axes.crbegin(); it != axes.crend(); ++it) + outDims.erase(outDims.begin() + static_cast<std::size_t>(*it)); + } + + // TODO: change {1} for {} when scalar Tensors are better handled. + mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1})); + + } + } + +void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(ReduceMean_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index 30b060cd2a58d7995a7447bd9b85b9bc0026a7f7..79cfc0659849248bac791ba5b1db25096824e928 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -9,14 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Reshape.hpp" + #include <cstddef> // std::size_t #include <cstdint> // std::int64_t +#include <memory> #include <stdexcept> // std::runtime_error #include <string> #include <vector> -#include "aidge/operator/Reshape.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" const std::string Aidge::Reshape_Op::Type = "Reshape"; @@ -55,4 +59,9 @@ void Aidge::Reshape_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } +} + +void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Reshape_Op, *this, name); + mOutputs[0]->setBackend(name, device); } \ No newline at end of file diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp index 4c121e1268c1e1a62f793f38c6d816e7c6b48c25..8b0d6f9db698e36d232dec38fd8cdd0fad5f8c59 100644 --- a/src/operator/Scaling.cpp +++ b/src/operator/Scaling.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Scaling.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Scaling.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Scaling_Op::Type = "Scaling"; -const std::string Aidge::Scaling_Op::Type = "Scaling"; \ No newline at end of file +void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Scaling_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp index 48ed5f8286712c94bcf87f3234e70080652ab141..a6edcf823695f95253d6c56e45975480909679d3 100644 --- a/src/operator/Sigmoid.cpp +++ b/src/operator/Sigmoid.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sigmoid.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Sigmoid.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Sigmoid_Op::Type = "Sigmoid"; -const std::string Aidge::Sigmoid_Op::Type = "Sigmoid"; \ No newline at end of file +void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Sigmoid_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp index e88ff4bb4ec6e2cb1357d578c2d07cc4edcb59f7..612c61b0f66b97eb4630214538a22154a67b80d8 100644 --- a/src/operator/Softmax.cpp +++ b/src/operator/Softmax.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Softmax.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Softmax.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Softmax_Op::Type = "Softmax"; -const std::string Aidge::Softmax_Op::Type = "Softmax"; \ No newline at end of file +void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Softmax_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp index dbcaba42619762f8fd00bb2f6e0aa0de11d92960..d8ac8b8b0bf28110bd52493d7833f64e9e80fc6a 100644 --- a/src/operator/Sqrt.cpp +++ b/src/operator/Sqrt.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sqrt.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Sqrt.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Sqrt_Op::Type = "Sqrt"; -const std::string Aidge::Sqrt_Op::Type = "Sqrt"; \ No newline at end of file +void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Sqrt_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp index 9d933bf6c97348842fae8f405d3e709e68d56916..0c12e6a1fdb7f3b1056e19bf694996d0061b5b04 100644 --- a/src/operator/Sub.cpp +++ b/src/operator/Sub.cpp @@ -9,15 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sub.hpp" + #include <cstddef> // std::size_t #include <stdexcept> // std::runtime_error #include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/Sub.hpp" -#include "aidge/utils/Types.h" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" const std::string Aidge::Sub_Op::Type = "Sub"; @@ -50,4 +53,9 @@ void Aidge::Sub_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + +void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Sub_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp index de55a6d6c69df5706b945ef9f56027f7a09ce8d7..c113ee6f2da52f40a66a8df04ca33ec4b85f3387 100644 --- a/src/operator/Tanh.cpp +++ b/src/operator/Tanh.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Tanh.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Tanh.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Tanh_Op::Type = "Tanh"; -const std::string Aidge::Tanh_Op::Type = "Tanh"; \ No newline at end of file +void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Tanh_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b42db7fe18d2269b95cf35fd92851d1e3684bad --- /dev/null +++ b/src/recipes/GraphViewHelper.cpp @@ -0,0 +1,57 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <memory> +#include <set> + +#include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/graph/GraphView.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/recipes/GraphViewHelper.hpp" + + +std::set<std::shared_ptr<Aidge::Tensor>> Aidge::producers(std::shared_ptr<Aidge::GraphView> graphview) { + std::set<std::shared_ptr<Tensor>> res; + const auto& nodes = graphview->getNodes(); + for (const auto& node : nodes) { + if (node->type() == "Producer") { + const auto& param = std::static_pointer_cast<OperatorTensor>(node->getOperator()); + res.insert(param->getOutput(0)); + } + } + return res; +} + + +std::set<std::shared_ptr<Aidge::Tensor>> Aidge::parameters(std::shared_ptr<Aidge::GraphView> graphview) { + std::set<std::shared_ptr<Tensor>> res; + const auto& nodes = graphview->getNodes(); + for (const auto& node : nodes) { + const auto& param = std::static_pointer_cast<OperatorTensor>(node->getOperator()); + for (std::size_t o = 0; o < param->nbOutputs(); ++o) { + res.insert(param->getOutput(o)); + } + } + return res; +} + +void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) { + for (const auto& node : gv->getNodes()) { + // TODO: check that each node is an OperatorTensor + AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type()); + const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator()); + for (std::size_t o = 0; o < node -> nbOutputs(); ++o) { + op->getOutput(o)->initGradient(); + } + } +} \ No newline at end of file diff --git a/src/recipes/RemoveDropout.cpp b/src/recipes/RemoveDropout.cpp index d141f5d3a74e42f8f0fc5465fda043f91f37d5bc..4f8805845bd1f46fd187cba3564b031c55c4655a 100644 --- a/src/recipes/RemoveDropout.cpp +++ b/src/recipes/RemoveDropout.cpp @@ -10,7 +10,6 @@ ********************************************************************************/ #include <memory> -#include <iostream> #include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index 8febe3cd267a4b5e9ebd1f5cc03805279fa5c382..94baf6a3e7b6e2e86de4e2d72ed19bfd9338392e 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -21,8 +21,9 @@ #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" #include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Types.h" +#include "aidge/recipes/GraphViewHelper.hpp" #include "aidge/operator/Producer.hpp" #include "aidge/operator/Memorize.hpp" #include "aidge/operator/MetaOperator.hpp" @@ -71,14 +72,14 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { do { // 2) From the current consumers list, check if any prior consumer node - // is needed. A prior will generally be required for any node consuming + // is needed. A prior will generally be required for any node consuming // parameters (weights and bias) that is not an input node. // If for a given node, only parent producers (at any depth) are needed // to satisfy its required data, it becomes a prior. // If the prior node is a producer, it is added to the list of required // producers. // If the prior node is of another type, it replaces the initial consumer - // in the new priorConsumers list. The initial consumer will become + // in the new priorConsumers list. The initial consumer will become // again a consumer later, by construction. if (verbose) fmt::print("List of consumers with their priors:\n"); std::set<std::shared_ptr<Node>> requiredProducers; @@ -130,7 +131,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { } // 5) Find runnable consumers. - // A consumer is runnable if the required data is available for all of + // A consumer is runnable if the required data is available for all of // its inputs. At this point, not all consumers are necessarily // runnable because some may depend on the execution of others (when // there is multiple successive priors for example). @@ -154,7 +155,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { fmt::print("{}", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1)); fmt::print("\n"); } - + bool isRunnable = true; for (IOIndex_t inputIdx = 0; inputIdx < consumer->nbInputs(); ++inputIdx) { if (/*consumer->getOperator()->getNbRequiredData(inputIdx) > 0 @@ -190,7 +191,7 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) { // 6) Push runnable consumers in the list of nodes to run and update the // consumer producer system. - // At this point, simultaneously runnable consumers have no data + // At this point, simultaneously runnable consumers have no data // dependency and could be run in parallel! for (const auto& runnable : runnableConsumers) { if (verbose) fmt::print("Runnable: {}\n", namePtrTable[runnable]); @@ -324,7 +325,7 @@ Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducer memManager.releaseDependencies(node); continue; } - + const auto childs = node->getChildren(); AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type."); const auto op = std::static_pointer_cast<OperatorTensor>(node->getOperator()); @@ -348,7 +349,7 @@ Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducer length = op->getOutput(outputIdx)->dims().end()[-1]; count = op->getOutput(outputIdx)->dims().end()[-2]; } - + // Check if wrap around buffer is possible for this node // (re-using previous node outputs memory for this node outputs). // => only if this node is the only child of its parent(s) @@ -356,7 +357,7 @@ Aidge::MemoryManager Aidge::SequentialScheduler::generateMemory(bool incProducer size_t wrapAroundExtra = 0; wrapAroundMemPlane.push_back(nullptr); - // Select the best parent among all allocable nodes for + // Select the best parent among all allocable nodes for // reallocation, which is the one with most memory (in order // to minimize the reallocation size). IOIndex_t inputIdx = 0; @@ -427,7 +428,7 @@ void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge // Assert that the number of input data producers corresponds to the number of data input assert(data.size() == inputNodes.size() && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph"); - + for (std::size_t i = 0; i < data.size(); ++i){ // TODO : maybe shallow copy instead of deepcopy inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]); @@ -436,7 +437,7 @@ void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) { - + // Collect all data input of the graph (that are producers) if (!data.empty()){ connectInputs(data); @@ -476,6 +477,59 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve } } +void Aidge::SequentialScheduler::backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instanciateGrad, bool verbose) { + // create ad set Grad values + if (instanciateGrad) { compile_gradient(mGraphView); } + + const auto& ordered_outputs = mGraphView->getOrderedOutputs(); + AIDGE_ASSERT(ordered_outputs.size() == data.size(), "You must provide the \ + right number of data objects to run the backward function. \ + {} outputs detected for the current GraphView when {} were \ + provided.", ordered_outputs.size(), data.size()); + for (std::size_t i = 0; i < ordered_outputs.size(); ++i) { + const std::shared_ptr<OperatorTensor> op_ = std::dynamic_pointer_cast<OperatorTensor>(ordered_outputs[i].first->getOperator()); + const std::shared_ptr<Tensor> t_grad = op_->getOutput(ordered_outputs[i].second)->grad(); + AIDGE_ASSERT(data[i]->dims() == t_grad->dims(), "Wrong gradient size."); + *t_grad = data[i]->clone(); + } + // Generate scheduling *only if empty* + // If scheduling was already generated (in one or several steps, i.e. one or + // several successive call to generateScheduling()), do not generate it twice + if (mStaticSchedule.empty()) { + this->generateScheduling(); + } + + // map of node <-> info to display with verbose + const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})"); + + // Clear previous scheduling results + mScheduling.clear(); + + std::size_t cpt = 0; + // run scheduled operators in reverse order + const auto& runnableList = mStaticSchedule.at(mStaticScheduleStep); + for (auto runnable = runnableList.crbegin(); runnable != runnableList.crend(); ++runnable) { + if (verbose) + fmt::print("run: {}\n", namePtrTable.at(*runnable)); + else + drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50, + (std::string("running ") + namePtrTable.at(*runnable))); + const auto tStart = std::chrono::high_resolution_clock::now(); + (*runnable)->backward(); + const auto tEnd = std::chrono::high_resolution_clock::now(); + mScheduling.push_back(SchedulingElement(*runnable, tStart, tEnd)); + cpt++; + } + if (!verbose) drawProgressBar(1.0, 50, " "); + fmt::print("\n"); + + ++mStaticScheduleStep; + if (mStaticScheduleStep == mStaticSchedule.size()) { + mStaticScheduleStep = 0; + } +} + + void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const { auto fp = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen((fileName + ".mmd").c_str(), "w"), &std::fclose); @@ -541,7 +595,7 @@ Aidge::NbElts_t Aidge::SequentialScheduler::getNbAvailableData(const std::shared const auto upperInput = upperNode->inputs()[nodeInputIdx]; if (upperInput.first) { return upperInput.first->getOperator()->getNbProducedData(upperInput.second); - } + } } ++nodeInputIdx; } diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index 3ff2a3c6c7422c1ead53a629670975a25e54f7d7..cd42791e0db1d95469bdd414cab94f1c6e8fea17 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -21,7 +21,7 @@ using namespace Aidge; -TEST_CASE("[core/operators] MetaOperator", "[Operator]") { +TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { SECTION("PaddedConv") { auto op = PaddedConv(1, 3, {3, 3}, "padded_conv", {1, 1}, {1, 1, 1, 1}); @@ -108,21 +108,21 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator]") { // Weights X myLSTM->input(1).first->getOperator()->setOutput(0, myInitW); - myLSTM->input(2).first->getOperator()->setOutput(0, myInitW); - myLSTM->input(3).first->getOperator()->setOutput(0, myInitW); - myLSTM->input(4).first->getOperator()->setOutput(0, myInitW); + op->setInput(2, myInitW); + op->setInput(3, myInitW); + op->setInput(4, myInitW); // Weights H - myLSTM->input(5).first->getOperator()->setOutput(0, myInitR); - myLSTM->input(6).first->getOperator()->setOutput(0, myInitR); - myLSTM->input(7).first->getOperator()->setOutput(0, myInitR); - myLSTM->input(8).first->getOperator()->setOutput(0, myInitR); + op->setInput(5, myInitR); + op->setInput(6, myInitR); + op->setInput(7, myInitR); + op->setInput(8, myInitR); auto g = getConnectedGraphView(myLSTM); g->save("lstm_before_expand", true, true); expandMetaOps(g); g->setRootNode(pop); - REQUIRE(g->getRootNode() == pop); + REQUIRE(g->rootNode() == pop); g->save("lstm_expanded", true, true); REQUIRE(g->getNodes().size() == 41); diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp index dd747cdea0020acc745237e30db88fc873109243..84099ac0b77a633893af6a7550464e539c95d806 100644 --- a/unit_tests/recipes/Test_removeFlatten.cpp +++ b/unit_tests/recipes/Test_removeFlatten.cpp @@ -45,7 +45,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") { CHECK(fc0->getParent(0) == nullptr); CHECK(fc0->getChildren(0).size() == 1); - CHECK(g->getRootNode() == fc0); + CHECK(g->rootNode() == fc0); } SECTION("flatten first layer : flatten removed") { auto g = Sequential({flatten, fc0}); @@ -60,7 +60,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") { CHECK(fc0->getParent(0) == nullptr); CHECK(fc0->getChildren(0).size() == 0); - CHECK(g->getRootNode() == fc0); + CHECK(g->rootNode() == fc0); } SECTION("flatten middle layer") { @@ -76,7 +76,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") { CHECK(fc1->getParent(0) == fc0); CHECK(fc0->getChildren(0)[0] == fc1); - CHECK(g->getRootNode() == fc0); + CHECK(g->rootNode() == fc0); } SECTION("flatten right after a producer") { auto g = Sequential({prod, flatten, fc0}); @@ -94,7 +94,7 @@ TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") { CHECK(fc0->getParent(0) == prod); CHECK(fc0->getChildren(0).size() == 0); - CHECK(g->getRootNode() == prod); + CHECK(g->rootNode() == prod); } }