From 6cd7a3c80b3d7563edf4b96d793bd94ab45b51f2 Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Thu, 21 Mar 2024 13:10:43 +0000 Subject: [PATCH] [Add] 'Operator::backend()' member function and move Tensor dependence from header to source file when possible in operator --- include/aidge/operator/Add.hpp | 21 ++------ include/aidge/operator/AvgPooling.hpp | 21 ++++---- include/aidge/operator/BatchNorm.hpp | 2 +- include/aidge/operator/Cast.hpp | 13 +++-- include/aidge/operator/Concat.hpp | 56 +++----------------- include/aidge/operator/Conv.hpp | 24 +++++---- include/aidge/operator/ConvDepthWise.hpp | 9 ++-- include/aidge/operator/Div.hpp | 15 ++---- include/aidge/operator/Erf.hpp | 17 +++---- include/aidge/operator/FC.hpp | 44 ++-------------- include/aidge/operator/Gather.hpp | 15 ++---- include/aidge/operator/GenericOperator.hpp | 56 +++++--------------- include/aidge/operator/Identity.hpp | 19 ++++--- include/aidge/operator/LeakyReLU.hpp | 4 +- include/aidge/operator/MatMul.hpp | 14 ++--- include/aidge/operator/MaxPooling.hpp | 18 ++++--- include/aidge/operator/Memorize.hpp | 2 +- include/aidge/operator/MetaOperator.hpp | 12 ++++- include/aidge/operator/Mul.hpp | 12 ++--- include/aidge/operator/Operator.hpp | 7 ++- include/aidge/operator/OperatorTensor.hpp | 31 ++---------- include/aidge/operator/Pop.hpp | 24 ++++----- include/aidge/operator/Pow.hpp | 13 ++--- include/aidge/operator/Producer.hpp | 59 ++++++++++------------ include/aidge/operator/ReLU.hpp | 13 ++--- include/aidge/operator/ReduceMean.hpp | 15 +++--- include/aidge/operator/Reshape.hpp | 10 ++-- include/aidge/operator/Scaling.hpp | 20 +++----- include/aidge/operator/Sigmoid.hpp | 11 ++-- include/aidge/operator/Slice.hpp | 2 +- include/aidge/operator/Softmax.hpp | 11 +--- include/aidge/operator/Sqrt.hpp | 14 ++--- include/aidge/operator/Sub.hpp | 16 ++---- include/aidge/operator/Tanh.hpp | 17 +++---- include/aidge/operator/Transpose.hpp | 2 +- src/operator/Add.cpp | 17 +++++++ src/operator/Cast.cpp | 5 ++ src/operator/Concat.cpp | 45 ++++++++++++++++- src/operator/Div.cpp | 9 +++- src/operator/Erf.cpp | 13 ++++- src/operator/FC.cpp | 48 +++++++++++++++++- src/operator/Gather.cpp | 16 ++++-- src/operator/GenericOperator.cpp | 41 +++++++++++++-- src/operator/MatMul.cpp | 6 +++ src/operator/MetaOperator.cpp | 9 +++- src/operator/Mul.cpp | 11 +++- src/operator/OperatorTensor.cpp | 43 +++++++++++++++- src/operator/Pop.cpp | 15 +++++- src/operator/Pow.cpp | 5 ++ src/operator/Producer.cpp | 55 +++++++++++++++++++- src/operator/ReLU.cpp | 13 ++++- src/operator/Reshape.cpp | 11 +++- src/operator/Scaling.cpp | 14 ++++- src/operator/Sigmoid.cpp | 14 ++++- src/operator/Softmax.cpp | 14 ++++- src/operator/Sqrt.cpp | 14 ++++- src/operator/Sub.cpp | 14 +++-- src/operator/Tanh.cpp | 14 ++++- 58 files changed, 645 insertions(+), 440 deletions(-) diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 3115cedca..93cfb4451 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -12,15 +12,11 @@ #ifndef AIDGE_CORE_OPERATOR_ADD_H_ #define AIDGE_CORE_OPERATOR_ADD_H_ -#include <numeric> -#include <vector> -#include <cmath> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -44,15 +40,7 @@ public: * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - Add_Op(const Add_Op& op) - : OperatorTensor(op) - { - if (op.mImpl){ - SET_IMPL_MACRO(Add_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ - mImpl = nullptr; - } - } + Add_Op(const Add_Op& op); /** * @brief Clone the operator using its copy-constructor. @@ -74,10 +62,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Add_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName() { return {"data_input_0", "data_input_n"}; diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index e427aac72..031046500 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -13,14 +13,18 @@ #define AIDGE_CORE_OPERATOR_AVGPOOLING_H_ #include <array> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> -#include <cmath> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -60,9 +64,9 @@ public: : OperatorTensor(op), Attributes_(op) { - if (op.mImpl){ - SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -101,8 +105,7 @@ public: std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, - const IOIndex_t outputIdx = 0) const override final - { + const IOIndex_t outputIdx = 0) const override final { if (outputIdx != 0) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); } @@ -153,8 +156,8 @@ public: } }; -template <DimIdx_t DIM> -const std::string AvgPooling_Op<DIM>::Type = "AvgPooling"; +template <Aidge::DimIdx_t DIM> +const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling"; template <std::array<DimSize_t, 1>::size_type DIM> inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 83ad2dbbb..51673dd3c 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -55,7 +55,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp index 7cc398567..bbc776a11 100644 --- a/include/aidge/operator/Cast.hpp +++ b/include/aidge/operator/Cast.hpp @@ -39,7 +39,11 @@ public: Cast_Op(const Cast_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Cast_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Cast_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -50,12 +54,7 @@ public: return std::make_shared<Cast_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - if (Registrar<Cast_Op>::exists({name})) { - mImpl = Registrar<Cast_Op>::create({name})(*this); - } - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; void forward() override; diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 450c40bd2..611ff6bd5 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -12,16 +12,16 @@ #ifndef AIDGE_CORE_OPERATOR_CONCAT_H_ #define AIDGE_CORE_OPERATOR_CONCAT_H_ -#include <numeric> -#include <vector> -#include <cmath> #include <memory> +#include <stdexcept> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" @@ -56,7 +56,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Concat_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Concat_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -70,51 +70,9 @@ public: return std::make_shared<Concat_Op>(*this); } - // Data operator[](const char* inputName) override final { - // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] : - // (strcmp(inputName, "weight") ? mInputs[1] : - // (strcmp(inputName, "bias") ? mInputs[2] : - // nullptr)); - // assert((in!=nullptr) && "No such parameter"); - // return *in; - // } - + void computeOutputDims() override final; - void computeOutputDims() override final { - // Every input is non-empty with the same number of dimensions - bool associated = (getInput(0) != nullptr); - associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input - auto outputDims = getInput(0)->dims(); - const auto firstInputNbDims = getInput(0) -> nbDims(); - for (IOIndex_t i = 1; i < nbInputs(); ++i) { - if (!getInput(i)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); - } - - if (getInput(i)->nbDims() == firstInputNbDims) { - for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) { - if (dim == getAttr<ConcatAttr::Axis>()) { - outputDims[dim] += getInput(i)->dims()[dim]; - } - else { - associated &= (getInput(i)->dims()[dim] == outputDims[dim]); - } - } - } - else { - associated = false; - break; - } - } - if (associated) { - getOutput(0)->resize(outputDims); - } - } - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Concat_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_0", "data_input_n"}; diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 517af5b05..c93a09810 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -13,17 +13,20 @@ #define AIDGE_CORE_OPERATOR_CONV_H_ #include <array> -#include <cmath> -#include <cstddef> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" -#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -77,9 +80,9 @@ public: : OperatorTensor(op), Attributes_(op) { - if (op.mImpl){ - SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -134,8 +137,10 @@ public: } } - -std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { + std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> + computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, + const std::vector<DimSize_t>& outputDims, + const IOIndex_t outputIdx = 0) const override { if (outputIdx != 0) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); } @@ -191,6 +196,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); } + void setBackend(const std::string &name, DeviceIdx_t device = 0) override { SET_IMPL_MACRO(Conv_Op<DIM>, *this, name); mOutputs[0]->setBackend(name, device); diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index 035bd84b6..559c0fc7a 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -13,14 +13,17 @@ #define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ #include <array> -#include <cmath> -#include <numeric> +#include <cmath> // std::floor +#include <cstddef> // std::size_t +#include <string> +#include <utility> // std::pair #include <vector> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ArrayHelpers.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -72,7 +75,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index be654a3c0..49410db04 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -12,14 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_DIV_H_ #define AIDGE_CORE_OPERATOR_DIV_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" @@ -40,9 +39,9 @@ public: Div_Op(const Div_Op& op) : OperatorTensor(op) { - if (op.mImpl){ - SET_IMPL_MACRO(Div_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(Div_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -57,11 +56,7 @@ public: void computeOutputDims() override final; - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Div_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp index 5a92b5dc4..5ec10522e 100644 --- a/include/aidge/operator/Erf.hpp +++ b/include/aidge/operator/Erf.hpp @@ -12,16 +12,14 @@ #ifndef AIDGE_CORE_OPERATOR_ERF_H_ #define AIDGE_CORE_OPERATOR_ERF_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -40,9 +38,9 @@ public: Erf_Op(const Erf_Op& op) : OperatorTensor(op) { - if (op.mImpl){ - SET_IMPL_MACRO(Erf_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(Erf_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -55,10 +53,7 @@ public: return std::make_shared<Erf_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Erf_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index c111e38b0..39b28c125 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -13,13 +13,10 @@ #define AIDGE_CORE_OPERATOR_FC_H_ #include <array> -#include <cmath> -#include <numeric> #include <memory> #include <vector> #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" @@ -58,7 +55,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(FC_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(FC_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -68,46 +65,15 @@ public: * @brief Clone the operator using its copy-constructor. * @see Operator::FC_Op */ - std::shared_ptr<Operator> clone() const override { + std::shared_ptr<Operator> clone() const override final { return std::make_shared<FC_Op>(*this); } - void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final { - assert(inputIdx < 3 && "operators supports only 3 inputs"); - assert(data->type() == Tensor::Type && "input data must be of Tensor type"); - // TODO: FIXME: check this, because data dims may not be initialized at this point... - //if (inputIdx == 2) { - // assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0)); - // assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1); - //} - mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); - if (inputIdx == 0 && getInput(0)->nbDims() == 1) - mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); - } + void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; - void computeOutputDims() override final { - bool associated = true; - for (IOIndex_t i = 0; i < nbInputs(); ++i) { - if (!getInput(i)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); - } - associated &= !(getInput(i)->empty()); - } - if (associated) { - // <batch, OutChannels> - mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()}); - } - } + void computeOutputDims() override final; - - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(FC_Op, *this, name); - mOutputs[0]->setBackend(name, device); - - // By default, automatically set backend for weight and bias inputs - getInput(1)->setBackend(name, device); - getInput(2)->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input", "weight", "bias"}; diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index 142f6582a..b7d18e644 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -12,16 +12,14 @@ #ifndef AIDGE_CORE_OPERATOR_GATHER_H_ #define AIDGE_CORE_OPERATOR_GATHER_H_ -#include <cassert> +#include <cstdint> // std::int64_t #include <memory> +#include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" @@ -59,8 +57,8 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Gather_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(Gather_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -75,10 +73,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Gather_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 20b0cdc4a..e7d60285b 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -15,8 +15,6 @@ #include <memory> #include <vector> #include <string> -#include <cassert> -#include <cstring> #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" @@ -38,8 +36,8 @@ private: public: GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut) : OperatorTensor(type, nbData, nbParam, nbOut) - { - mImpl = std::make_shared<OperatorImpl>(*this); + { + mImpl = std::make_shared<OperatorImpl>(*this, ""); } /** @@ -49,9 +47,11 @@ public: GenericOperator_Op(const GenericOperator_Op& op) : OperatorTensor(op) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); } + ~GenericOperator_Op() = default; + /** * @brief Clone the operator using its copy-constructor. * @see Operator::GenericOperator_Op @@ -60,50 +60,20 @@ public: return std::make_shared<GenericOperator_Op>(*this); } +public: + void computeOutputDims() override final; + + bool outputDimsForwarded() const override final; + + void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } + void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } + // Helper functions that can be used with setComputeOutputDims(): static const ComputeDimsFunc Identity; static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs); - inline void setComputeOutputDims(ComputeDimsFunc func) { mComputeOutputDims = func; } - - - void computeOutputDims() override final { - if (mComputeOutputDims) { - std::vector<std::vector<size_t>> inputsDims(nbInputs(), std::vector<size_t>()); - for (std::size_t i = 0; i < nbInputs(); ++i) { - if (getInput(i)) { - inputsDims[i] = getInput(i)->dims(); - } - } - - const auto& outputsDims = mComputeOutputDims(inputsDims); - assert(outputsDims.size() == nbOutputs() && "The provided ComputeDimsFunc function returns the wrong number of outputs"); - for (std::size_t i = 0; i < nbOutputs(); ++i) { - mOutputs[i]->resize(outputsDims[i]); - } - } - else { - assert(false && "Cannot compute output dim of a GenericOperator"); - } - } - - bool outputDimsForwarded() const override final { - if (mComputeOutputDims) { - return !(mOutputs[0]->empty()); - } - else { - assert(false && "GenericOperator cannot forward dims"); - return false; - } - } - - - ~GenericOperator_Op() = default; - - void setBackend(const std::string & /*name*/, DeviceIdx_t /*device*/ = 0) override { fmt::print("setBackend: not available yet.\n"); } - void setDataType(const DataType& /*datatype*/) const override { fmt::print("setDataType: not available yet.\n"); } }; /** diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp index c2e6eaff7..27432bc5b 100644 --- a/include/aidge/operator/Identity.hpp +++ b/include/aidge/operator/Identity.hpp @@ -40,9 +40,9 @@ public: static const std::string Type; Identity_Op() - : OperatorTensor(Type, 1, 0, 1) + : OperatorTensor(Type, 1, 0, 1) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, ""); } /** @@ -52,7 +52,7 @@ public: Identity_Op(const Identity_Op& op) : OperatorTensor(op) { - mImpl = std::make_shared<OperatorImpl>(*this); + mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); } /** @@ -65,11 +65,16 @@ public: void computeOutputDims() override final {} // Do nothing + /** + * @brief Check if output dimensions have been computed. + * @note Since Indentity has no output Tensor, this function checks if its + * only input's dimensions have been computed. + * + * @return true Input has dimensions. + * @return false Input has no dimensions or is a nullptr. + */ bool outputDimsForwarded() const override final { - if (mInputs[0]) - return !mInputs[0]->empty(); - else - return false; + return mInputs[0] ? !mInputs[0]->empty() : false; } diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index c48b85b4a..83a7c30fc 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -55,8 +55,8 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(LeakyReLU_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend()); + } else { mImpl = nullptr; } } diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 596aa6346..43bd8b165 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -17,7 +17,6 @@ #include <vector> #include "aidge/utils/Types.h" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Registrar.hpp" @@ -39,7 +38,11 @@ public: */ MatMul_Op(const MatMul_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(MatMul_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -64,10 +67,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override final { - SET_IMPL_MACRO(MatMul_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName() { return {"data_input1", "data_input2"}; @@ -82,4 +82,4 @@ inline std::shared_ptr<Node> MatMul(const std::string& name = "") { } } // namespace Aidge -#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */ +#endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */ diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 06ac30158..5b09aa02c 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -13,16 +13,20 @@ #define AIDGE_CORE_OPERATOR_MAXPOOLING_H_ #include <array> -#include <numeric> +#include <cmath> // std::ceil, std::floor +#include <cstddef> // std::size_t +#include <functional> +#include <memory> +#include <stdexcept> // std::runtime_error #include <vector> -#include <cmath> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/operator/Producer.hpp" -#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/ArrayHelpers.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -64,9 +68,9 @@ public: : OperatorTensor(op), Attributes_(op) { - if (op.mImpl){ - SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + if (op.mImpl) { + SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend()); + } else { mImpl = nullptr; } } diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp index 8991ccb44..73433aaca 100644 --- a/include/aidge/operator/Memorize.hpp +++ b/include/aidge/operator/Memorize.hpp @@ -54,7 +54,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Memorize_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Memorize_Op>::create(op.backend())(*this) : nullptr; mOutputs[1] = mOutputs[0]; } diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 7f36eca2c..4d719b6cb 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -12,10 +12,18 @@ #ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_ #define AIDGE_CORE_OPERATOR_METAOPERATOR_H_ -#include "aidge/operator/OperatorTensor.hpp" +#include <array> +#include <memory> +#include <string> + +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/graph/GraphView.hpp" #include "aidge/graph/OpArgs.hpp" +#include "aidge/operator/OperatorTensor.hpp" #include "aidge/scheduler/Scheduler.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" namespace Aidge { class MetaOperator_Op : public OperatorTensor, @@ -28,7 +36,7 @@ public: std::weak_ptr<Node> mUpperNode; public: - MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph); + MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph); /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 753040788..cc9fba594 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -19,7 +19,6 @@ #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" @@ -43,7 +42,11 @@ public: Mul_Op(const Mul_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Mul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl) { + SET_IMPL_MACRO(Mul_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -56,10 +59,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Mul_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index 396c60e46..17c8204c1 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -81,7 +81,7 @@ public: virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0; /** - * @brief Set the specified input by performing a deep copy of the given data. + * @brief Set the specified input value by performing a deep copy of the given data. * The pointer itself is not changed, thus keeping the current connections. * @param inputIdx Index of the input to set. * @param data Data to copy. @@ -90,7 +90,7 @@ public: virtual void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) = 0; virtual std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const = 0; /** - * @brief Set the specified output by performing a deep copy of the given data. + * @brief Set the specified output value by performing a deep copy of the given data. * The pointer itself is not changed, thus keeping the current connections. * @param inputIdx Index of the input to set. */ @@ -110,6 +110,9 @@ public: /////////////////////////////////////////////////////// // IMPLEMENTATION /////////////////////////////////////////////////////// + std::string backend() const noexcept { + return mImpl ? mImpl->backend() : ""; + } virtual void setBackend(const std::string& name, DeviceIdx_t device = 0) = 0; virtual void setDataType(const DataType& dataType) const = 0; diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index 504a41648..adf45c2d8 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -17,12 +17,12 @@ #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/utils/Types.h" namespace Aidge { +class Tensor; class OperatorTensor : public Operator { /* TODO: Add an attribute specifying the type of Data used by the Operator. * The same way ``Type`` attribute specifies the type of Operator. Hence this @@ -41,26 +41,9 @@ public: OperatorTensor() = delete; OperatorTensor(const std::string& type, const IOIndex_t nbData, const IOIndex_t nbParam, - const IOIndex_t nbOut) - : Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor), - mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)), - mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) { - mOutputs[i] = std::make_shared<Tensor>(); - mOutputs[i]->setDataType(DataType::Float32); - } - } + const IOIndex_t nbOut); - OperatorTensor(const OperatorTensor& other) - : Operator(other), - mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), - mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { - mOutputs[i] = std::make_shared<Tensor>(); - // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); - // datatype already copied - } - } + OperatorTensor(const OperatorTensor& other); ~OperatorTensor(); @@ -76,17 +59,13 @@ public: void setInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final; void setInput(const IOIndex_t inputIdx, std::shared_ptr<Data>&& data) override final; const std::shared_ptr<Tensor>& getInput(const IOIndex_t inputIdx) const; - inline std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { - return std::static_pointer_cast<Data>(getInput(inputIdx)); - } + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final; // output management void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override; void setOutput(const IOIndex_t outputIdx, std::shared_ptr<Data>&& data) override; virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const; - inline std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final { - return std::static_pointer_cast<Data>(getOutput(outputIdx)); - } + std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final; /////////////////////////////////////////////////// /////////////////////////////////////////////////// diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp index cb4ba871a..9109ccaeb 100644 --- a/include/aidge/operator/Pop.hpp +++ b/include/aidge/operator/Pop.hpp @@ -12,17 +12,16 @@ #ifndef AIDGE_CORE_OPERATOR_POP_H_ #define AIDGE_CORE_OPERATOR_POP_H_ -#include <cassert> #include <memory> +#include <string> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" -#include "aidge/utils/Types.h" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" namespace Aidge { enum class PopAttr { ForwardStep }; @@ -40,9 +39,7 @@ public: Pop_Op() : OperatorTensor(Type, 1, 0, 1), Attributes_(attr<PopAttr::ForwardStep>(0)) - { - - } + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -52,7 +49,11 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Pop_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Pop_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -63,10 +64,7 @@ public: return std::make_shared<Pop_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Pop_Op>::create({name})(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; void computeOutputDims() override final; void updateConsummerProducer() override; diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index ec4eebf9d..aadbf92c4 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -19,8 +19,6 @@ #include "aidge/utils/Registrar.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" @@ -41,7 +39,7 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(Pow_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Pow_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -58,15 +56,12 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Pow_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; - static const std::vector<std::string> getInputsName(){ + static const std::vector<std::string> getInputsName() { return {"data_input_1", "data_input_2"}; } - static const std::vector<std::string> getOutputsName(){ + static const std::vector<std::string> getOutputsName() { return {"data_output"}; } }; diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index c9b1f6e4a..66c66d90b 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -12,7 +12,9 @@ #ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_ #define AIDGE_CORE_OPERATOR_PRODUCER_H_ +#include <cstddef> #include <array> +#include <memory> #include <vector> #include "aidge/utils/Types.h" @@ -42,41 +44,40 @@ public: Producer_Op(const std::array<DimSize_t, DIM>& dims, bool constant = false) : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) + Attributes_(attr<ProdAttr::Constant>(constant)) { mOutputs[0]->resize(dims); - mImpl = std::make_shared<OperatorImpl>(*this); + // mImpl = std::make_shared<OperatorImpl>(*this, ""); + mImpl = nullptr; } - Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false) - : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) - { - mOutputs[0] = tensor; // copy the pointer of the Tensor - mImpl = std::make_shared<OperatorImpl>(*this); - } + /** + * @brief Construct a new Producer_Op object from a Tensor. + * + * @param tensor Tensor to set in the Prducer. + * @param constant Whether the Producer should be considered constant. + */ + Producer_Op(const std::shared_ptr<Tensor> tensor, bool constant = false); /** - * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). * @param op OperatorTensor to copy. */ - Producer_Op(const Producer_Op& op) - : OperatorTensor(op), - Attributes_(op) - { - for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { - mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i))); - } - if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){ - SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend()); - }else{ - mImpl = std::make_shared<OperatorImpl>(*this); - } - } + Producer_Op(const Producer_Op& op); + +public: + /** + * @brief Conversion operator from Producer to Tensor. + * + * @return std::shared_ptr<Tensor> + */ + operator std::shared_ptr<Tensor>() const { return mOutputs[0]; } +public: /** * @brief Clone the operator using its copy-constructor. - * @see Operator::Producer_Op + * @see Operator::Producer_Op(const Producer_Op&) */ std::shared_ptr<Operator> clone() const override { return std::make_shared<Producer_Op>(*this); @@ -86,17 +87,14 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input."); } - void computeOutputDims() override final {} + void computeOutputDims() noexcept override final {} - bool outputDimsForwarded() const override final {return true;} + inline bool outputDimsForwarded() const noexcept override final { return true; } inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Producer_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; static const std::vector<std::string> getInputsName(){ return {}; @@ -105,7 +103,6 @@ public: return {"data_output"}; } -public: void forward() override final { fmt::print("Basic Producer forward() function.\n"); } diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 5b8f5c4b8..963de31c4 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -16,11 +16,11 @@ #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -40,7 +40,7 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(ReLU_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(ReLU_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -55,10 +55,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(ReLU_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index 09f1d5835..609f5be5f 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -12,17 +12,18 @@ #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ -#include <algorithm> // std::for_each +#include <algorithm> // std::for_each, std::sort #include <array> -#include <cmath> #include <cstdint> // std::int32_t -#include <numeric> +#include <memory> +#include <stdexcept> // std::runtime_error #include <vector> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" +#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" @@ -58,7 +59,7 @@ class ReduceMean_Op : public OperatorTensor, Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.backend()); }else{ mImpl = nullptr; } @@ -95,10 +96,8 @@ class ReduceMean_Op : public OperatorTensor, outDims.erase(outDims.begin() + static_cast<std::size_t>(*it)); } - if(outDims.size()>0) - mOutputs[0]->resize(outDims); - else - mOutputs[0]->resize({1}); + mOutputs[0]->resize((outDims.size()>0) ? outDims : {1}); + } } diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index 8914bbc9a..060029bb8 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -12,7 +12,6 @@ #ifndef AIDGE_CORE_OPERATOR_RESHAPE_H_ #define AIDGE_CORE_OPERATOR_RESHAPE_H_ -#include <cassert> #include <memory> #include <vector> @@ -54,8 +53,8 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(Reshape_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -70,10 +69,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Reshape_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index 29ce0527a..8f54ab217 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -9,18 +9,17 @@ * ********************************************************************************/ -#ifndef __AIDGE_CORE_OPERATOR_Scaling_H__ -#define __AIDGE_CORE_OPERATOR_Scaling_H__ +#ifndef AIDGE_CORE_OPERATOR_SCALING_H_ +#define AIDGE_CORE_OPERATOR_SCALING_H_ #include <vector> #include <memory> -#include "aidge/utils/StaticAttributes.hpp" -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -56,7 +55,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Scaling_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Scaling_Op, *this, op.backend()); } else { mImpl = nullptr; } @@ -70,10 +69,7 @@ public: return std::make_shared<Scaling_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Scaling_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName() { return {"data_input"}; @@ -99,4 +95,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[] = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"}; } -#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */ +#endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */ diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp index ab97bf321..bea9fc45e 100644 --- a/include/aidge/operator/Sigmoid.hpp +++ b/include/aidge/operator/Sigmoid.hpp @@ -39,7 +39,11 @@ public: Sigmoid_Op(const Sigmoid_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Sigmoid_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Sigmoid_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -51,10 +55,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Sigmoid_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 363c3c2b4..f68aa17f4 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -56,7 +56,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Slice_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Slice_Op, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 943f69a58..d48dbc2b6 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -12,14 +12,10 @@ #ifndef AIDGE_CORE_OPERATOR_SOFTMAX_H_ #define AIDGE_CORE_OPERATOR_SOFTMAX_H_ -#include <cassert> #include <memory> #include <vector> - #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" @@ -56,7 +52,7 @@ public: Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Softmax_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Softmax_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -70,10 +66,7 @@ public: return std::make_shared<Softmax_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Softmax_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp index dd3fa541b..f5ffa4311 100644 --- a/include/aidge/operator/Sqrt.hpp +++ b/include/aidge/operator/Sqrt.hpp @@ -12,16 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_SQRT_H_ #define AIDGE_CORE_OPERATOR_SQRT_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -46,7 +43,7 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(Sqrt_Op, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Sqrt_Op, *this, op.backend()); }else{ mImpl = nullptr; } @@ -60,10 +57,7 @@ public: return std::make_shared<Sqrt_Op>(*this); } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Sqrt_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index 5683a9be5..fbcebcc9f 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -12,16 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_SUB_H_ #define AIDGE_CORE_OPERATOR_SUB_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" -#include "aidge/data/Data.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -46,8 +43,8 @@ public: : OperatorTensor(op) { if (op.mImpl){ - SET_IMPL_MACRO(Sub_Op, *this, op.mOutputs[0]->getImpl()->backend()); - }else{ + SET_IMPL_MACRO(Sub_Op, *this, op.backend()); + } else { mImpl = nullptr; } } @@ -63,10 +60,7 @@ public: void computeOutputDims() override final; - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - SET_IMPL_MACRO(Sub_Op, *this, name); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input_1", "data_input_2"}; diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp index ce0dc12a0..3fd5377d3 100644 --- a/include/aidge/operator/Tanh.hpp +++ b/include/aidge/operator/Tanh.hpp @@ -12,15 +12,13 @@ #ifndef AIDGE_CORE_OPERATOR_TANH_H_ #define AIDGE_CORE_OPERATOR_TANH_H_ -#include <cassert> #include <memory> #include <vector> -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -39,7 +37,11 @@ public: Tanh_Op(const Tanh_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Tanh_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; + if (op.mImpl){ + SET_IMPL_MACRO(Tanh_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } } /** @@ -51,10 +53,7 @@ public: } - void setBackend(const std::string& name, DeviceIdx_t device = 0) override { - mImpl = Registrar<Tanh_Op>::create(name)(*this); - mOutputs[0]->setBackend(name, device); - } + void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; static const std::vector<std::string> getInputsName(){ return {"data_input"}; diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index b040fc907..1beb5781b 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -57,7 +57,7 @@ class Transpose_Op : public OperatorTensor, Attributes_(op) { if (op.mImpl){ - SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend()); + SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.backend()); }else{ mImpl = nullptr; } diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp index a54302d06..85bc4b7ae 100644 --- a/src/operator/Add.cpp +++ b/src/operator/Add.cpp @@ -14,12 +14,24 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Add.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" const std::string Aidge::Add_Op::Type = "Add"; +Aidge::Add_Op::Add_Op(const Add_Op& op) + : OperatorTensor(op) +{ + if (op.mImpl) { + SET_IMPL_MACRO(Add_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } +} + void Aidge::Add_Op::computeOutputDims() { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input @@ -59,3 +71,8 @@ void Aidge::Add_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } } + +void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(Add_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp index f09d8eb83..7dfb4d3bf 100644 --- a/src/operator/Cast.cpp +++ b/src/operator/Cast.cpp @@ -24,3 +24,8 @@ void Aidge::Cast_Op::forward() { runHooks(); } + +void Aidge::Cast_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Cast_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp index eafcd1264..7df5b6dbf 100644 --- a/src/operator/Concat.cpp +++ b/src/operator/Concat.cpp @@ -9,8 +9,49 @@ * ********************************************************************************/ +#include "aidge/operator/Concat.hpp" + #include <string> +#include <vector> -#include "aidge/operator/Concat.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Concat_Op::Type = "Concat"; + +void Aidge::Concat_Op::computeOutputDims() { + // Every input is non-empty with the same number of dimensions + bool associated = (getInput(0) != nullptr); + associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input + auto outputDims = getInput(0)->dims(); + const auto firstInputNbDims = getInput(0) -> nbDims(); + for (IOIndex_t i = 1; i < nbInputs(); ++i) { + if (!getInput(i)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); + } + + if (getInput(i)->nbDims() == firstInputNbDims) { + for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) { + if (dim == getAttr<ConcatAttr::Axis>()) { + outputDims[dim] += getInput(i)->dims()[dim]; + } + else { + associated &= (getInput(i)->dims()[dim] == outputDims[dim]); + } + } + } + else { + associated = false; + break; + } + } + if (associated) { + getOutput(0)->resize(outputDims); + } +} -const std::string Aidge::Concat_Op::Type = "Concat"; \ No newline at end of file +void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(Concat_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp index 6b55338f4..5ffe5f08d 100644 --- a/src/operator/Div.cpp +++ b/src/operator/Div.cpp @@ -14,6 +14,7 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/backend/OperatorImpl.hpp" #include "aidge/operator/Div.hpp" #include "aidge/utils/Types.h" @@ -50,4 +51,10 @@ void Aidge::Div_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + + +void Aidge::Div_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Div_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp index 387af4edf..81c87f10b 100644 --- a/src/operator/Erf.cpp +++ b/src/operator/Erf.cpp @@ -9,8 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/Erf.hpp" + #include <string> -#include "aidge/operator/Erf.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Erf_Op::Type = "Erf"; -const std::string Aidge::Erf_Op::Type = "Erf"; \ No newline at end of file +void Aidge::Erf_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Erf_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index 32114f5bf..9865d64f6 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -9,8 +9,52 @@ * ********************************************************************************/ +#include "aidge/operator/FC.hpp" + +#include <memory> #include <string> +#include <vector> -#include "aidge/operator/FC.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::FC_Op::Type = "FC"; + +void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { + AIDGE_ASSERT(inputIdx < 3, "Operators {} supports only {} inputs", type(), nbInputs()); + AIDGE_ASSERT(data->type() == Tensor::Type, "input data must be of Tensor type"); + // TODO: FIXME: check this, because data dims may not be initialized at this point... + //if (inputIdx == 2) { + // assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0)); + // assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1); + //} + mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data); + if (inputIdx == 0 && getInput(0)->nbDims() == 1) + mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); +} + +void Aidge::FC_Op::computeOutputDims() { + bool associated = true; + for (IOIndex_t i = 0; i < nbInputs(); ++i) { + if (!getInput(i)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); + } + associated &= !(getInput(i)->empty()); + } + if (associated) { + // <batch, OutChannels> + mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()}); + } +} + +void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(FC_Op, *this, name); + mOutputs[0]->setBackend(name, device); -const std::string Aidge::FC_Op::Type = "FC"; \ No newline at end of file + // By default, automatically set backend for weight and bias inputs + getInput(1)->setBackend(name, device); + getInput(2)->setBackend(name, device); +} diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index b5f9d738a..259e65139 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -9,15 +9,18 @@ * ********************************************************************************/ -#include <cstddef> -#include <cstdint> +#include "aidge/operator/Gather.hpp" + +#include <cstddef> // std::size_t +#include <cstdint> // std::int64_t #include <string> #include <vector> -#include "aidge/operator/Gather.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" + const std::string Aidge::Gather_Op::Type = "Gather"; void Aidge::Gather_Op::computeOutputDims() { @@ -44,4 +47,9 @@ void Aidge::Gather_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + +void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Gather_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp index 5556f4ff5..3eae49b69 100644 --- a/src/operator/GenericOperator.cpp +++ b/src/operator/GenericOperator.cpp @@ -9,13 +9,48 @@ * ********************************************************************************/ +#include "aidge/operator/GenericOperator.hpp" + +#include <cstddef> // std::size_t #include <vector> -#include "aidge/operator/GenericOperator.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/ErrorHandling.hpp" const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Identity - = [](const std::vector<std::vector<size_t>>& inputsDims) { return inputsDims; }; + = [](const std::vector<std::vector<std::size_t>>& inputsDims) { return inputsDims; }; const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs) { - return [nbOutputs, inputIdx](const std::vector<std::vector<size_t>>& inputsDims) { return std::vector<std::vector<size_t>>(nbOutputs, inputsDims[inputIdx]); }; + return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); }; } + +void Aidge::GenericOperator_Op::computeOutputDims() { + if (mComputeOutputDims) { + std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>()); + for (std::size_t i = 0; i < nbInputs(); ++i) { + if (getInput(i)) { + inputsDims[i] = getInput(i)->dims(); + } + } + + const auto& outputsDims = mComputeOutputDims(inputsDims); + AIDGE_ASSERT((outputsDims.size() == nbOutputs()), "The provided ComputeDimsFunc function returns the wrong number of outputs"); + for (std::size_t i = 0; i < nbOutputs(); ++i) { + mOutputs[i]->resize(outputsDims[i]); + } + } + else { + AIDGE_ASSERT(false, "Cannot compute output dim of a GenericOperator"); + } +} + +bool Aidge::GenericOperator_Op::outputDimsForwarded() const { + if (mComputeOutputDims) { + return !(mOutputs[0]->empty()); + } + else { + AIDGE_ASSERT(false, "GenericOperator cannot forward dims"); + return false; + } +} \ No newline at end of file diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp index f48c7ca81..568998753 100644 --- a/src/operator/MatMul.cpp +++ b/src/operator/MatMul.cpp @@ -13,6 +13,7 @@ #include <string> #include <vector> +#include "aidge/data/Tensor.hpp" #include "aidge/operator/MatMul.hpp" #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" @@ -70,3 +71,8 @@ void Aidge::MatMul_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } } + +void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(MatMul_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp index 883185021..45e755626 100644 --- a/src/operator/MetaOperator.cpp +++ b/src/operator/MetaOperator.cpp @@ -10,9 +10,16 @@ ********************************************************************************/ #include "aidge/operator/MetaOperator.hpp" + +#include <cstddef> // std::size_t +#include <memory> +#include <string> + +#include "aidge/data/Tensor.hpp" +#include "aidge/graph/GraphView.hpp" #include "aidge/utils/ErrorHandling.hpp" -Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph) +Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shared_ptr<GraphView>& graph) : OperatorTensor(type, graph->dataInputs().size(), (graph->getOrderedInputs().size() - graph->dataInputs().size()), graph->getOrderedOutputs().size()), mGraph(graph) { diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp index d4a594e95..89bef9e0e 100644 --- a/src/operator/Mul.cpp +++ b/src/operator/Mul.cpp @@ -10,14 +10,16 @@ ********************************************************************************/ #include <cstddef> // std::size_t +#include <memory> #include <stdexcept> // std::runtime_error #include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/operator/Mul.hpp" -#include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Types.h" const std::string Aidge::Mul_Op::Type = "Mul"; @@ -53,4 +55,9 @@ void Aidge::Mul_Op::computeOutputDims() { else if (!getInput(0)->empty() && !getInput(1)->empty()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims()); } -} \ No newline at end of file +} + +void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Mul_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index c0ada2654..33f93d8e6 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -19,6 +19,32 @@ #include "aidge/utils/ErrorHandling.hpp" +Aidge::OperatorTensor::OperatorTensor(const std::string& type, + const IOIndex_t nbData, + const IOIndex_t nbParam, + const IOIndex_t nbOut) +: Operator(type, nbData, nbParam, nbOut, OperatorType::Tensor), + mInputs(std::vector<std::shared_ptr<Tensor>>(nbData + nbParam, nullptr)), + mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) { + for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) { + mOutputs[i] = std::make_shared<Tensor>(); + mOutputs[i]->setDataType(DataType::Float32); + } +} + + +Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other) + : Operator(other), + mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), + mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { + for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { + mOutputs[i] = std::make_shared<Tensor>(); + // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); + // datatype already copied + } +} + + void Aidge::OperatorTensor::associateInput(const Aidge::IOIndex_t inputIdx, const std::shared_ptr<Aidge::Data>& data) { AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); AIDGE_ASSERT(data->type() == Tensor::Type, "Input data must be of Tensor type"); @@ -45,6 +71,9 @@ void Aidge::OperatorTensor::setInput(const Aidge::IOIndex_t inputIdx, std::share } } +std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawInput(const Aidge::IOIndex_t inputIdx) const { + return std::static_pointer_cast<Data>(getInput(inputIdx)); +} const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidge::IOIndex_t inputIdx) const { AIDGE_ASSERT(inputIdx < nbInputs(), "{} Operator has {} inputs", type(), nbInputs()); return mInputs[inputIdx]; @@ -53,13 +82,23 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidg void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) { AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); - *mOutputs[outputIdx] = *std::dynamic_pointer_cast<Tensor>(data); + const auto& data_tensor = std::dynamic_pointer_cast<Tensor>(data); + // if (mImpl) + // AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend()); + *mOutputs[outputIdx] = *data_tensor; } void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) { AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type()); AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs()); - *mOutputs[outputIdx] = std::move(*std::dynamic_pointer_cast<Tensor>(data)); + auto&& data_tensor =std::move(std::dynamic_pointer_cast<Tensor>(data)); + // if (mImpl) + // AIDGE_ASSERT(data_tensor->getImpl()->backend() == backend(), "Data parameter and Operator have different backends: {} and {}", data_tensor->getImpl()->backend(), backend()); + *mOutputs[outputIdx] = std::move(*data_tensor); +} + +std::shared_ptr<Aidge::Data> Aidge::OperatorTensor::getRawOutput(const Aidge::IOIndex_t outputIdx) const { + return std::static_pointer_cast<Data>(getOutput(outputIdx)); } const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aidge::IOIndex_t outputIdx) const { diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp index 3dd65eb4d..06999e301 100644 --- a/src/operator/Pop.cpp +++ b/src/operator/Pop.cpp @@ -9,9 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/Pop.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Pop.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Pop_Op::Type = "Pop"; @@ -36,3 +44,8 @@ void Aidge::Pop_Op::forward() { Operator::forward(); ++this->template getAttr<PopAttr::ForwardStep>(); } + +void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Pop_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp index 5e29eae0c..6b16117d6 100644 --- a/src/operator/Pow.cpp +++ b/src/operator/Pow.cpp @@ -50,4 +50,9 @@ void Aidge::Pow_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } +} + +void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Pow_Op, *this, name); + mOutputs[0]->setBackend(name, device); } \ No newline at end of file diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp index 7bccbe763..4a63b207c 100644 --- a/src/operator/Producer.cpp +++ b/src/operator/Producer.cpp @@ -9,8 +9,61 @@ * ********************************************************************************/ +#include "aidge/operator/Producer.hpp" + +#include <cstddef> +#include <array> +#include <memory> #include <string> -#include "aidge/operator/Producer.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" +#include "aidge/utils/Types.h" + const std::string Aidge::Producer_Op::Type = "Producer"; + + +Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant) + : OperatorTensor(Type, 0, 0, 1), + Attributes_(attr<ProdAttr::Constant>(constant)) +{ + mOutputs[0] = tensor; // copy the pointer of the Tensor + mImpl = (tensor->hasImpl()) ? + std::make_shared<OperatorImpl>(*this, tensor->getImpl()->backend()) : + nullptr; +} + +/** + * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), + * but not its input tensors (the new operator has no input associated). + * @param op OperatorTensor to copy. + */ +Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op) + : OperatorTensor(op), + Attributes_(op) +{ + mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0))); + if (mOutputs[0]->hasImpl()) { + if (Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){ + setImpl(Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this)); + } + else { + mImpl = std::make_shared<OperatorImpl>(*this, mOutputs[0]->getImpl()->backend()); + } + } else { + mImpl = nullptr; + } +} + +void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + if (Registrar<Producer_Op>::exists(name)) { + setImpl(Registrar<Producer_Op>::create(name)(*this)); + } else { + mImpl = std::make_shared<OperatorImpl>(*this, name); + } + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp index 0f7874acf..7b945a7d6 100644 --- a/src/operator/ReLU.cpp +++ b/src/operator/ReLU.cpp @@ -9,8 +9,17 @@ * ********************************************************************************/ +#include "aidge/operator/ReLU.hpp" + +#include <memory> #include <string> -#include "aidge/operator/ReLU.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::ReLU_Op::Type = "ReLU"; -const std::string Aidge::ReLU_Op::Type = "ReLU"; \ No newline at end of file +void Aidge::ReLU_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(ReLU_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index 30b060cd2..79cfc0659 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -9,14 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Reshape.hpp" + #include <cstddef> // std::size_t #include <cstdint> // std::int64_t +#include <memory> #include <stdexcept> // std::runtime_error #include <string> #include <vector> -#include "aidge/operator/Reshape.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Types.h" const std::string Aidge::Reshape_Op::Type = "Reshape"; @@ -55,4 +59,9 @@ void Aidge::Reshape_Op::computeOutputDims() { mOutputs[0]->resize(outDims); } +} + +void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Reshape_Op, *this, name); + mOutputs[0]->setBackend(name, device); } \ No newline at end of file diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp index 4c121e126..8b0d6f9db 100644 --- a/src/operator/Scaling.cpp +++ b/src/operator/Scaling.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Scaling.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Scaling.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Scaling_Op::Type = "Scaling"; -const std::string Aidge::Scaling_Op::Type = "Scaling"; \ No newline at end of file +void Aidge::Scaling_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Scaling_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp index 48ed5f828..a6edcf823 100644 --- a/src/operator/Sigmoid.cpp +++ b/src/operator/Sigmoid.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sigmoid.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Sigmoid.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Sigmoid_Op::Type = "Sigmoid"; -const std::string Aidge::Sigmoid_Op::Type = "Sigmoid"; \ No newline at end of file +void Aidge::Sigmoid_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Sigmoid_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp index e88ff4bb4..612c61b0f 100644 --- a/src/operator/Softmax.cpp +++ b/src/operator/Softmax.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Softmax.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Softmax.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Softmax_Op::Type = "Softmax"; -const std::string Aidge::Softmax_Op::Type = "Softmax"; \ No newline at end of file +void Aidge::Softmax_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Softmax_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp index dbcaba426..d8ac8b8b0 100644 --- a/src/operator/Sqrt.cpp +++ b/src/operator/Sqrt.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sqrt.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Sqrt.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Sqrt_Op::Type = "Sqrt"; -const std::string Aidge::Sqrt_Op::Type = "Sqrt"; \ No newline at end of file +void Aidge::Sqrt_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Sqrt_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp index 9d933bf6c..0c12e6a1f 100644 --- a/src/operator/Sub.cpp +++ b/src/operator/Sub.cpp @@ -9,15 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Sub.hpp" + #include <cstddef> // std::size_t #include <stdexcept> // std::runtime_error #include <string> #include <vector> #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/operator/Sub.hpp" -#include "aidge/utils/Types.h" +#include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" const std::string Aidge::Sub_Op::Type = "Sub"; @@ -50,4 +53,9 @@ void Aidge::Sub_Op::computeOutputDims() { } mOutputs[0]->resize(outDims); } -} \ No newline at end of file +} + +void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + SET_IMPL_MACRO(Sub_Op, *this, name); + mOutputs[0]->setBackend(name, device); +} diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp index de55a6d6c..c113ee6f2 100644 --- a/src/operator/Tanh.cpp +++ b/src/operator/Tanh.cpp @@ -9,8 +9,18 @@ * ********************************************************************************/ +#include "aidge/operator/Tanh.hpp" + +#include <memory> #include <string> -#include "aidge/operator/Tanh.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +const std::string Aidge::Tanh_Op::Type = "Tanh"; -const std::string Aidge::Tanh_Op::Type = "Tanh"; \ No newline at end of file +void Aidge::Tanh_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { + mImpl = Registrar<Tanh_Op>::create(name)(*this); + mOutputs[0]->setBackend(name, device); +} \ No newline at end of file -- GitLab