diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index 9a9fced142ebc345c095c1eeca6b9a6c4270cf36..ab70bca2a039cc474c4c88ae7249e99d38e0126d 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -28,27 +28,31 @@ enum class AvgPoolingAttr { StrideDims, KernelDims }; template <DimIdx_t DIM> class AvgPooling_Op : public OperatorTensor, - public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>, - public StaticAttributes<AvgPoolingAttr, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>> { + public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)> { public: static const std::string Type; - AvgPooling_Op() = delete; - +private: using Attributes_ = StaticAttributes<AvgPoolingAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>>; template <AvgPoolingAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + + AvgPooling_Op() = delete; + constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) : OperatorTensor(Type, 1, 0, 1), - Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims), - attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {} + mAttributes(std::make_shared<Attributes_>( + attr<AvgPoolingAttr::StrideDims>(stride_dims), + attr<AvgPoolingAttr::KernelDims>(kernel_dims))) + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -76,6 +80,10 @@ public: void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); } + static const std::vector<std::string> getInputsName() { return {"data_input"}; } @@ -105,6 +113,10 @@ inline std::shared_ptr<Node> AvgPooling( } // namespace Aidge +template class Aidge::StaticAttributes<Aidge::AvgPoolingAttr, std::array<Aidge::DimSize_t, 1>, std::array<Aidge::DimSize_t, 1>>; +template class Aidge::StaticAttributes<Aidge::AvgPoolingAttr, std::array<Aidge::DimSize_t, 2>, std::array<Aidge::DimSize_t, 2>>; +template class Aidge::StaticAttributes<Aidge::AvgPoolingAttr, std::array<Aidge::DimSize_t, 3>, std::array<Aidge::DimSize_t, 3>>; +template class Aidge::StaticAttributes<Aidge::AvgPoolingAttr, std::array<Aidge::DimSize_t, 4>, std::array<Aidge::DimSize_t, 4>>; extern template class Aidge::AvgPooling_Op<1>; extern template class Aidge::AvgPooling_Op<2>; extern template class Aidge::AvgPooling_Op<3>; diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index aa53f8c43f0be2a0e094946d66fd263bc19e39f5..3b4f74ba2891e9fae8f7566c04d9b6a7ea92166b 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -28,21 +28,25 @@ enum class BatchNormAttr { Epsilon, Momentum }; template <DimIdx_t DIM> class BatchNorm_Op : public OperatorTensor, - public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>, - public StaticAttributes<BatchNormAttr, float, float> { + public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)> { public: static const std::string Type; - BatchNorm_Op() = delete; - +private: using Attributes_ = StaticAttributes<BatchNormAttr, float, float>; template <BatchNormAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + + BatchNorm_Op() = delete; constexpr BatchNorm_Op(float epsilon, float momentum) : OperatorTensor(Type, 1, 4, 1), - Attributes_(attr<BatchNormAttr::Epsilon>(epsilon), - attr<BatchNormAttr::Momentum>(momentum)) {} + mAttributes(std::make_shared<Attributes_>( + attr<BatchNormAttr::Epsilon>(epsilon), + attr<BatchNormAttr::Momentum>(momentum))) {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -72,6 +76,10 @@ public: void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); } + inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); } + static const std::vector<std::string> getInputsName() { return {"data_input", "scale", "shift", "mean", "variance"}; } diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index a9a4c9253f3af9f9cd82390256ec70d066017cc5..e86f092c6a8cc6b7ca913735f5af3717777e94ba 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -28,25 +28,32 @@ namespace Aidge { class Concat_OpImpl : public OperatorImpl { public: - Concat_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + Concat_OpImpl(const Operator& op, const std::string& backend = "") + : OperatorImpl(op, backend) + {} void forward() override; }; enum class ConcatAttr { Axis }; class Concat_Op : public OperatorTensor, - public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>, - public StaticAttributes<ConcatAttr, DimSize_t> { + public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)> { public: static const std::string Type; - using Attributes_ = StaticAttributes<ConcatAttr, DimSize_t>; +private: + using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>; template <ConcatAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; - Concat_Op(const IOIndex_t nbIn, const DimSize_t axis) +public: + Concat_Op() = delete; + + Concat_Op(const IOIndex_t nbIn, const std::int32_t axis) : OperatorTensor(Type, nbIn, 0, 1), - Attributes_(attr<ConcatAttr::Axis>(axis)) + mAttributes(std::make_shared<Attributes_>( + attr<ConcatAttr::Axis>(axis))) { if (nbIn == 0) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Add operator should have at least one input."); @@ -60,7 +67,7 @@ public: */ Concat_Op(const Concat_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (!op.backend().empty()) { SET_IMPL_MACRO(Concat_Op, *this, op.backend()); @@ -82,6 +89,9 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input_0", "data_input_n"}; } @@ -90,7 +100,7 @@ public: } }; -inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const DimIdx_t axis = 0, const std::string& name = "") { +inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Concat_Op>(nbIn, axis), name); } } diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index c30282f3438889e233f3d9ed22ab7c7e795b2951..ea46f37c710883a21d8332ae2f5c030de0fa27af 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -34,37 +34,35 @@ enum class ConvAttr { StrideDims, DilationDims, KernelDims, NoBias }; template <DimIdx_t DIM> class Conv_Op : public OperatorTensor, - public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>, - public StaticAttributes<ConvAttr, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - bool> { + public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)> { public: static const std::string Type; - Conv_Op() = delete; - +private: using Attributes_ = StaticAttributes<ConvAttr, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - bool>; + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + bool>; template <ConvAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + Conv_Op() = delete; constexpr Conv_Op(const std::array<DimSize_t, DIM> &kernelDims, const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1), bool noBias = false) : OperatorTensor(Type, 1, 2, 1), - Attributes_(attr<ConvAttr::StrideDims>(strideDims), - attr<ConvAttr::DilationDims>(dilationDims), - // attr<ConvAttr::InChannels>(inChannels), - // attr<ConvAttr::OutChannels>(outChannels), - attr<ConvAttr::KernelDims>(kernelDims), - attr<ConvAttr::NoBias>(noBias)) {} + mAttributes(std::make_shared<Attributes_>( + attr<ConvAttr::StrideDims>(strideDims), + attr<ConvAttr::DilationDims>(dilationDims), + attr<ConvAttr::KernelDims>(kernelDims), + attr<ConvAttr::NoBias>(noBias))) + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -117,6 +115,13 @@ public: return getInput(1)->template dims<DIM+2>()[0]; } + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); } + inline bool& noBias() const { return mAttributes->template getAttr<ConvAttr::NoBias>(); } + + static const std::vector<std::string> getInputsName(){ return {"data_input", "weight", "bias"}; } diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index 7091421720aaf4291198823a6d7dcd732a8d9f99..b2234f4cb80356b9ed64193f9f42ed8b8849e12b 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -33,34 +33,35 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims, NoBias }; template <DimIdx_t DIM> class ConvDepthWise_Op : public OperatorTensor, - public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>, - public StaticAttributes<ConvDepthWiseAttr, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - bool> { + public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)> { public: static const std::string Type; - ConvDepthWise_Op() = delete; - +private: using Attributes_ = StaticAttributes<ConvDepthWiseAttr, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - bool>; + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + bool>; template <ConvDepthWiseAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + + ConvDepthWise_Op() = delete; constexpr ConvDepthWise_Op(const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1), bool no_bias=false) : OperatorTensor(Type, 1, 2, 1), - Attributes_(attr<ConvDepthWiseAttr::StrideDims>(stride_dims), - attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), - attr<ConvDepthWiseAttr::KernelDims>(kernel_dims), - attr<ConvDepthWiseAttr::NoBias>(no_bias)) {} + mAttributes(std::make_shared<Attributes_>( + attr<ConvDepthWiseAttr::StrideDims>(stride_dims), + attr<ConvDepthWiseAttr::DilationDims>(dilation_dims), + attr<ConvDepthWiseAttr::KernelDims>(kernel_dims), + attr<ConvDepthWiseAttr::NoBias>(no_bias))) + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -93,6 +94,12 @@ public: return getInput(1)->template dims<DIM+2>()[0]; } + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); } + inline bool& noBias() const { return mAttributes->template getAttr<ConvDepthWiseAttr::NoBias>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input", "weight", "bias"}; } diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 9f10970c4fd5b21a1cb92b334167d353f066e05b..37cebdd0c8542ac0de3f51ebd0a992959fe8b64f 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -29,19 +29,22 @@ enum class FCAttr { NoBias }; class FC_Op : public OperatorTensor, public Registrable<FC_Op, std::string, - std::shared_ptr<OperatorImpl>(const FC_Op &)>, - public StaticAttributes<FCAttr, bool> { + std::shared_ptr<OperatorImpl>(const FC_Op &)> { public: static const std::string Type; - FC_Op() = delete; +private: + template <FCAttr e> + using attr = typename StaticAttributes<FCAttr, bool>::template attr<e>; + const std::shared_ptr<StaticAttributes<FCAttr, bool>> mAttributes; - using Attributes_ = StaticAttributes<FCAttr, bool>; - template <FCAttr e> using attr = typename Attributes_::template attr<e>; +public: + FC_Op() = delete; FC_Op(bool noBias) : OperatorTensor(Type, 1, 2, 1), - Attributes_(attr<FCAttr::NoBias>(noBias)) + mAttributes(std::make_shared<StaticAttributes<FCAttr, bool>>( + attr<FCAttr::NoBias>(noBias))) {} /** @@ -50,11 +53,11 @@ public: */ FC_Op(const FC_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { - if (op.mImpl){ + if (op.mImpl) { SET_IMPL_MACRO(FC_Op, *this, op.backend()); - }else{ + } else { mImpl = nullptr; } } @@ -79,6 +82,8 @@ public: } return getInput(1)->template dims<2>()[0]; } + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline bool& noBias() const { return mAttributes -> getAttr<FCAttr::NoBias>(); } static const std::vector<std::string> getInputsName() { return {"data_input", "weight", "bias"}; diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index a04e4be69c9fd1a6ed7753ed512c7f5e45b925d9..a7ec3cf221c7b679d1b4d152b9410a5cdf374c52 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -12,7 +12,7 @@ #ifndef AIDGE_CORE_OPERATOR_GATHER_H_ #define AIDGE_CORE_OPERATOR_GATHER_H_ -#include <cstdint> // std::int64_t +#include <cstdint> // std::int8_t, std::int64_t #include <memory> #include <string> #include <vector> @@ -36,21 +36,31 @@ enum class GatherAttr { Axis, Indices, GatheredShape }; class Gather_Op : public OperatorTensor, public Registrable<Gather_Op, std::string, - std::shared_ptr<OperatorImpl>(const Gather_Op&)>, - public StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>> { - + std::shared_ptr<OperatorImpl>(const Gather_Op&)> { public: static const std::string Type; + using Attributes_ = StaticAttributes<GatherAttr, + std::int8_t, + std::vector<int64_t>, + std::vector<DimSize_t>>; +private: + template <GatherAttr e> + using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + Gather_Op() = delete; - using Attributes_ = StaticAttributes<GatherAttr, std::int8_t, std::vector<int64_t>, std::vector<DimSize_t>>; - template <GatherAttr e> using attr = typename Attributes_::template attr<e>; - Gather_Op(std::int8_t axis, const std::vector<int64_t>& indices, const std::vector<DimSize_t>& gatheredShape) - : OperatorTensor(Type, 2, 0, 1), - Attributes_(attr<GatherAttr::Axis>(axis), - attr<GatherAttr::Indices>(indices), - attr<GatherAttr::GatheredShape>(gatheredShape)) + Gather_Op(std::int8_t axis, + const std::vector<int64_t>& indices, + const std::vector<DimSize_t>& gatheredShape) + : OperatorTensor(Type, 2, 0, 1), + mAttributes(std::make_shared<Attributes_>( + attr<GatherAttr::Axis>(axis), + attr<GatherAttr::Indices>(indices), + attr<GatherAttr::GatheredShape>(gatheredShape))) { mImpl = std::make_shared<Gather_OpImpl>(*this); } @@ -61,7 +71,7 @@ public: */ Gather_Op(const Gather_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (!op.backend().empty()) { SET_IMPL_MACRO(Gather_Op, *this, op.backend()); @@ -84,6 +94,11 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); } + inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); } + inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input", "indices"}; } diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 4ac9b4c1c40803309815f0ef1fb05c9e5a28e957..25fb0523aac4dad2dd95bb48dd931d674ca8329d 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -26,16 +26,18 @@ namespace Aidge { class GenericOperator_Op : public OperatorTensor, - public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>, - public DynamicAttributes { + public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> { private: using ComputeDimsFunc = std::function<std::vector<std::vector<size_t>>(const std::vector<std::vector<size_t>>&)>; ComputeDimsFunc mForwardDims; + const std::shared_ptr<DynamicAttributes> mAttributes; + public: GenericOperator_Op(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut) - : OperatorTensor(type, nbData, nbParam, nbOut) + : OperatorTensor(type, nbData, nbParam, nbOut), + mAttributes(std::make_shared<DynamicAttributes>()) { mImpl = std::make_shared<OperatorImpl>(*this); } @@ -45,7 +47,8 @@ public: * @param op Operator to copy. */ GenericOperator_Op(const GenericOperator_Op& op) - : OperatorTensor(op) + : OperatorTensor(op), + mAttributes(op.attributes() ? op.mAttributes : std::make_shared<DynamicAttributes>()) { mImpl = std::make_shared<OperatorImpl>(*this, op.backend()); } @@ -64,6 +67,22 @@ public: bool forwardDims(bool allowDataDependency = false) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + + template <class T> + inline T& getAttr(const std::string& name) + { return mAttributes -> template getAttr<T>(name); } + template <class T> + inline const T& getAttr(const std::string& name) const + { return mAttributes -> template getAttr<T>(name); } + + ///\brief Add a new Attribute, identified by its name. If it already exists, asserts. + ///\tparam T expected Attribute type + ///\param name Attribute name + ///\param value Attribute value + template <class T> + inline void addAttr(const std::string& name, const T& value) const + { mAttributes -> template addAttr<T>(name, value); } // Helper functions that can be used with setForwardDims(): static const ComputeDimsFunc Identity; diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 83a7c30fce7e0f68576f367d4b0bfe48edf4b3b6..2556a395b28be980dcd0d9e91669fb2fe78e6549 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -12,16 +12,16 @@ #ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_ #define AIDGE_CORE_OPERATOR_LEAKYRELU_H_ -#include <vector> #include <memory> +#include <vector> -#include "aidge/utils/StaticAttributes.hpp" -#include "aidge/utils/Registrar.hpp" -#include "aidge/operator/OperatorTensor.hpp" #include "aidge/backend/OperatorImpl.hpp" -#include "aidge/data/Tensor.hpp" #include "aidge/data/Data.hpp" +#include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -30,20 +30,24 @@ enum class LeakyReLUAttr { }; class LeakyReLU_Op : public OperatorTensor, - public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>, - public StaticAttributes<LeakyReLUAttr, float> { + public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)> { public: static const std::string Type; - LeakyReLU_Op() = delete; - +private: using Attributes_ = StaticAttributes<LeakyReLUAttr, float>; template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + + LeakyReLU_Op() = delete; LeakyReLU_Op(float negativeSlope) : OperatorTensor(Type, 1, 0, 1), - Attributes_( - attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)) + mAttributes( + std::make_shared<Attributes_>( + attr<LeakyReLUAttr::NegativeSlope>(negativeSlope))) {} /** @@ -52,7 +56,7 @@ public: */ LeakyReLU_Op(const LeakyReLU_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl){ SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend()); @@ -76,6 +80,9 @@ public: mOutputs[0]->setBackend(name, device); } + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input"}; } diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 8aff1582604a9e23e248e7c01521567483c793ad..6f6dd8b38fbe92170481f2bfea867352e51e1161 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -34,30 +34,31 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode }; template <DimIdx_t DIM> class MaxPooling_Op : public OperatorTensor, - public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>, - public StaticAttributes<MaxPoolingAttr, - std::array<DimSize_t, DIM>, - std::array<DimSize_t, DIM>, - bool> { + public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)> { public: static const std::string Type; - MaxPooling_Op() = delete; - using Attributes_ = StaticAttributes<MaxPoolingAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, bool>; + +private: template <MaxPoolingAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + MaxPooling_Op() = delete; constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), bool ceil_mode = false) : OperatorTensor(Type, 1, 0, 1), - Attributes_(attr<MaxPoolingAttr::StrideDims>(stride_dims), - attr<MaxPoolingAttr::KernelDims>(kernel_dims), - attr<MaxPoolingAttr::CeilMode>(ceil_mode)) + mAttributes(std::make_shared<Attributes_>( + attr<MaxPoolingAttr::StrideDims>(stride_dims), + attr<MaxPoolingAttr::KernelDims>(kernel_dims), + attr<MaxPoolingAttr::CeilMode>(ceil_mode))) {} /** @@ -66,7 +67,7 @@ public: */ MaxPooling_Op(const MaxPooling_Op<DIM>& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl) { SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend()); @@ -93,17 +94,17 @@ public: const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>()); std::function<float(float)> roundingFunction; - if (this->template getAttr<MaxPoolingAttr::CeilMode>()) { + if (mAttributes->template getAttr<MaxPoolingAttr::CeilMode>()) { roundingFunction = [](float x) { return std::ceil(x); }; } else { roundingFunction = [](float x) { return std::floor(x); }; } - for (std::size_t dim = 0; dim < this->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) { + for (std::size_t dim = 0; dim < mAttributes->template getAttr<MaxPoolingAttr::KernelDims>().size() ; ++dim) { outputDims[dim+2] = 1 + static_cast<DimSize_t>( roundingFunction(static_cast<float>(inputDims[dim+2] - - this->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) / - static_cast<float>(this->template getAttr<MaxPoolingAttr::StrideDims>()[dim]))); + mAttributes->template getAttr<MaxPoolingAttr::KernelDims>()[dim]) / + static_cast<float>(mAttributes->template getAttr<MaxPoolingAttr::StrideDims>()[dim]))); } outputDims[1] = inputDims[1]; outputDims[0] = inputDims[0]; @@ -119,6 +120,11 @@ public: mOutputs[0]->setBackend(name, device); } + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); } + inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); } + inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input"}; } diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp index 6b0ace2eb09fde069f8b9b104f92fc33811c25aa..714f629d71d4a032360eccc00562bd9af32aa95d 100644 --- a/include/aidge/operator/Memorize.hpp +++ b/include/aidge/operator/Memorize.hpp @@ -37,20 +37,25 @@ public: enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep }; class Memorize_Op : public OperatorTensor, - public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)>, - public StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int> { + public Registrable<Memorize_Op, std::string, std::unique_ptr<OperatorImpl>(const Memorize_Op&)> { public: static const std::string Type; - using Attributes_ = StaticAttributes<MemorizeAttr, unsigned int, unsigned int, unsigned int>; +private: + using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>; template <MemorizeAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; - Memorize_Op(const unsigned int endStep) +public: + Memorize_Op() = delete; + + Memorize_Op(const std::uint32_t endStep) : OperatorTensor(Type, 1, 1, 2), - Attributes_(attr<MemorizeAttr::ScheduleStep>(0), - attr<MemorizeAttr::ForwardStep>(0), - attr<MemorizeAttr::EndStep>(endStep)) + mAttributes(std::make_shared<Attributes_>( + attr<MemorizeAttr::ScheduleStep>(0), + attr<MemorizeAttr::ForwardStep>(0), + attr<MemorizeAttr::EndStep>(endStep))) { mOutputs[1] = mOutputs[0]; } @@ -62,7 +67,7 @@ public: */ Memorize_Op(const Memorize_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl) { SET_IMPL_MACRO(Memorize_Op, *this, op.backend()); @@ -87,6 +92,11 @@ public: void updateConsummerProducer() override; void forward() override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); } + inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); } + inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input", "data_input_init"}; } @@ -95,7 +105,7 @@ public: } }; -inline std::shared_ptr<Node> Memorize(const unsigned int endStep, const std::string& name = "") { +inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Memorize_Op>(endStep), name); } } // namespace Aidge diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index 124512517b8c6a274ff426034c15424c82bb0030..31aa0f0eb4862ac5d23ff5f6fedd42c94a4b7051 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -18,11 +18,21 @@ #include <utility> #include <cstddef> +#ifdef PYBIND +#include <pybind11/pybind11.h> +#include <fmt/format.h> +#endif + + #include "aidge/backend/OperatorImpl.hpp" #include "aidge/data/Data.hpp" +#include "aidge/utils/Attributes.hpp" #include "aidge/utils/Types.h" #include "aidge/hook/Hook.hpp" +#ifdef PYBIND +namespace py = pybind11; +#endif namespace Aidge { enum class OperatorType { @@ -73,6 +83,7 @@ public: public: virtual std::shared_ptr<Operator> clone() const = 0; + virtual std::shared_ptr<Attributes> attributes() const { return nullptr; }; /** * @brief Set the specified input with a shallow copy. * @param inputIdx Index of the input to set. @@ -192,6 +203,17 @@ public: static const std::vector<std::string> getOutputsName() { return {}; } + +#ifdef PYBIND + std::string repr() const { + return fmt::format("Operator(type = '{}', nb_in = {}, nb_out = {}, attr = {}, backend = {})", + type(), + nbInputs(), + nbOutputs(), + (attributes() ? attributes()->repr() : "None"), + (mImpl ? "'"+backend()+"'" : "None")); + } +#endif }; } // namespace Aidge diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp index a4e4ebdce801971de118ca8a263999046a13777d..79a3489a289df6857d85ff1e22dd5669a42e85ff 100644 --- a/include/aidge/operator/Pad.hpp +++ b/include/aidge/operator/Pad.hpp @@ -13,16 +13,16 @@ #define AIDGE_CORE_OPERATOR_PAD_H_ #include <array> -#include <numeric> +#include <memory> +#include <string> #include <vector> -#include <cmath> #include "aidge/data/Tensor.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/operator/Producer.hpp" -#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { @@ -31,30 +31,31 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap }; template <DimIdx_t DIM> class Pad_Op : public OperatorTensor, - public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>, - public StaticAttributes<PadAttr, - std::array<DimSize_t, 2*DIM>, - PadBorderType, - double> { + public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)> { public: static const std::string Type; - Pad_Op() = delete; - +private: using Attributes_ = StaticAttributes<PadAttr, - std::array<DimSize_t, 2*DIM>, - PadBorderType, - double>; + std::array<DimSize_t, 2*DIM>, + PadBorderType, + double>; template <PadAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + + Pad_Op() = delete; constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples, const PadBorderType &borderType = PadBorderType::Constant, double borderValue = 0.0) : OperatorTensor(Type, 1, 0, 1), - Attributes_(attr<PadAttr::BeginEndBorders>(beginEndTuples), - attr<PadAttr::BorderType>(borderType), - attr<PadAttr::BorderValue>(borderValue)) {} + mAttributes(std::make_shared<Attributes_>( + attr<PadAttr::BeginEndBorders>(beginEndTuples), + attr<PadAttr::BorderType>(borderType), + attr<PadAttr::BorderValue>(borderValue))) {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -62,7 +63,7 @@ public: */ Pad_Op(const Pad_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) {} /** @@ -87,9 +88,9 @@ public: const std::array<DimSize_t, DIM + 2> inputDims = getInput(0)->template dims<DIM+2>(); for (std::size_t dim = 0; dim < DIM; ++dim) { - outputDims[dim+2] = this->template getAttr<PadAttr::BeginEndBorders>()[2*dim] + outputDims[dim+2] = mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim] + inputDims[dim+2] - + this->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1]; + + mAttributes->template getAttr<PadAttr::BeginEndBorders>()[2*dim+1]; } outputDims[1] = inputDims[1]; outputDims[0] = inputDims[0]; @@ -104,6 +105,11 @@ public: mOutputs[0]->setBackend(name, device); } + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); } + inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); } + inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input"}; } diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp index 2219f30ec9db7acf55491882a78e7a1ed2931cf0..93493cd8e095116db7e8c1dcb94f3bc76b1d25c9 100644 --- a/include/aidge/operator/Pop.hpp +++ b/include/aidge/operator/Pop.hpp @@ -34,18 +34,19 @@ public: enum class PopAttr { ForwardStep }; class Pop_Op : public OperatorTensor, - public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)>, - public StaticAttributes<PopAttr, unsigned int> { + public Registrable<Pop_Op, std::string, std::unique_ptr<OperatorImpl>(const Pop_Op&)> { public: static const std::string Type; - using Attributes_ = StaticAttributes<PopAttr, unsigned int>; - template <PopAttr e> - using attr = typename Attributes_::template attr<e>; +private: + using Attributes_ = StaticAttributes<PopAttr, std::uint32_t>; + template <PopAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; +public: Pop_Op() : OperatorTensor(Type, 1, 0, 1), - Attributes_(attr<PopAttr::ForwardStep>(0)) + mAttributes(std::make_shared<Attributes_>(attr<PopAttr::ForwardStep>(0))) { mImpl = std::make_shared<Pop_OpImpl>(*this); } @@ -56,7 +57,7 @@ public: */ Pop_Op(const Pop_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (!op.backend().empty()) { SET_IMPL_MACRO(Pop_Op, *this, op.backend()); @@ -80,6 +81,9 @@ public: void updateConsummerProducer() override; void forward() override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input"}; } diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index f547a45ab55e4f9a38c2f63c6e8af44430813668..1b9505c883cae62d3ba44bd6ff07932014607d60 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -31,20 +31,24 @@ enum class ProdAttr { Constant }; class Producer_Op : public OperatorTensor, public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>( - const Producer_Op &)>, - public StaticAttributes<ProdAttr, bool> { + const Producer_Op &)> { public: static const std::string Type; +private: using Attributes_ = StaticAttributes<ProdAttr, bool>; - template <ProdAttr e> - using attr = typename Attributes_::template attr<e>; + template <ProdAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + Producer_Op() = delete; template <std::size_t DIM> Producer_Op(const std::array<DimSize_t, DIM>& dims, bool constant = false) : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) + mAttributes(std::make_shared<Attributes_>( + attr<ProdAttr::Constant>(constant))) { mOutputs[0]->resize(dims); mImpl = std::make_shared<OperatorImpl>(*this); @@ -95,6 +99,9 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); } + static const std::vector<std::string> getInputsName(){ return {}; } @@ -109,7 +116,7 @@ public: } void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override { - if (getAttr<ProdAttr::Constant>()) { + if (mAttributes->template getAttr<ProdAttr::Constant>()) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output."); } OperatorTensor::setOutput(outputIdx, data); diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp index ff8d8b0696aafdab48cd37d049fa0473078d7ea6..fdbc3a5c132e0c9599481370bc39f351df8883bc 100644 --- a/include/aidge/operator/ReduceMean.hpp +++ b/include/aidge/operator/ReduceMean.hpp @@ -29,22 +29,28 @@ namespace Aidge { enum class ReduceMeanAttr { Axes, KeepDims }; class ReduceMean_Op : public OperatorTensor, - public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>, - public StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t> { + public Registrable<ReduceMean_Op, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)> { - public: +public: static const std::string Type; - ReduceMean_Op() = delete; - - using Attributes_ = StaticAttributes<ReduceMeanAttr, std::vector<std::int32_t>, DimSize_t>; +private: + using Attributes_ = StaticAttributes<ReduceMeanAttr, + std::vector<std::int32_t>, + DimSize_t>; template <ReduceMeanAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + ReduceMean_Op() = delete; ReduceMean_Op(const std::vector<std::int32_t>& axes, DimSize_t keep_dims) : OperatorTensor(Type, 1, 0, 1), - Attributes_(attr<ReduceMeanAttr::Axes>(axes), - attr<ReduceMeanAttr::KeepDims>(keep_dims)) {} + mAttributes(std::make_shared<Attributes_>( + attr<ReduceMeanAttr::Axes>(axes), + attr<ReduceMeanAttr::KeepDims>(keep_dims))) + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -52,7 +58,7 @@ class ReduceMean_Op : public OperatorTensor, */ ReduceMean_Op(const ReduceMean_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl){ SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend()); @@ -73,6 +79,11 @@ class ReduceMean_Op : public OperatorTensor, void setBackend(const std::string &name, DeviceIdx_t device = 0) override final; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); } + inline DimSize_t& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); } + + static const std::vector<std::string> getInputsName() { return {"data_input"}; } diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index 12fbda88b0044f836b298e0cf818724f53f821a7..23494a4d1f45106c6946cb4e5fcc7384b6afd720 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -32,22 +32,26 @@ public: enum class ReshapeAttr { Shape, AllowZero }; class Reshape_Op : public OperatorTensor, - public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>, - public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool> { + public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)> { public: static const std::string Type; - Reshape_Op() = delete; +private: + using Attributes_ = StaticAttributes<ReshapeAttr, + std::vector<std::int64_t>, + bool>; + template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; - using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>, bool>; - template <ReshapeAttr e> - using attr = typename Attributes_::template attr<e>; +public: + Reshape_Op() = delete; Reshape_Op(const std::vector<std::int64_t>& shape, bool allowzero) : OperatorTensor(Type, 2, 0, 1), - Attributes_(attr<ReshapeAttr::Shape>(shape), - attr<ReshapeAttr::AllowZero>(allowzero)) + mAttributes(std::make_shared<Attributes_>( + attr<ReshapeAttr::Shape>(shape), + attr<ReshapeAttr::AllowZero>(allowzero))) { mImpl = std::make_shared<Reshape_OpImpl>(*this); } @@ -58,7 +62,7 @@ public: */ Reshape_Op(const Reshape_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (!op.backend().empty()) { SET_IMPL_MACRO(Reshape_Op, *this, op.backend()); @@ -81,6 +85,10 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; + std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); } + inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input"}; } diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index c864bd045d8a5a1fc5f4ee591d1d81fcaf241bac..725ec171b93ed301a377449b6253dff99a49d114 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -12,39 +12,42 @@ #ifndef AIDGE_CORE_OPERATOR_SCALING_H_ #define AIDGE_CORE_OPERATOR_SCALING_H_ +#include <cstddef> // std::size_t #include <vector> #include <memory> #include "aidge/backend/OperatorImpl.hpp" #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" -#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Registrar.hpp" +#include "aidge/utils/StaticAttributes.hpp" #include "aidge/utils/Types.h" namespace Aidge { enum class ScalingAttr { - scalingFactor, quantizedNbBits, isOutputUnsigned + ScalingFactor, QuantizedNbBits, IsOutputUnsigned }; -class Scaling_Op +class Scaling_Op : public OperatorTensor, - public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)>, - public StaticAttributes<ScalingAttr, float, size_t, bool> { + public Registrable<Scaling_Op, std::string, std::shared_ptr<OperatorImpl>(const Scaling_Op&)> { public: static const std::string Type; - Scaling_Op() = delete; - +private: using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>; template <ScalingAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + Scaling_Op() = delete; Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned) : OperatorTensor(Type, 1, 0, 1), - Attributes_( - attr<ScalingAttr::scalingFactor>(scalingFactor), - attr<ScalingAttr::quantizedNbBits>(nbBits), - attr<ScalingAttr::isOutputUnsigned>(isOutputUnsigned)) + mAttributes(std::make_shared<Attributes_>( + attr<ScalingAttr::ScalingFactor>(scalingFactor), + attr<ScalingAttr::QuantizedNbBits>(nbBits), + attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned))) {} /** @@ -53,7 +56,7 @@ public: */ Scaling_Op(const Scaling_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl){ SET_IMPL_MACRO(Scaling_Op, *this, op.backend()); @@ -72,6 +75,11 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); } + inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); } + inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); } + static const std::vector<std::string> getInputsName() { return {"data_input"}; } @@ -85,10 +93,10 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::stri return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor), name); } */ -inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, - std::size_t quantizedNbBits=8, - bool isOutputUnsigned=true, - const std::string& name = "") +inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, + std::size_t quantizedNbBits=8, + bool isOutputUnsigned=true, + const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Scaling_Op>(scalingFactor,quantizedNbBits, isOutputUnsigned), name); } @@ -97,7 +105,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, namespace { template <> const char* const EnumStrings<Aidge::ScalingAttr>::data[] - = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"}; + = {"ScalingFactor", "QuantizedNbBits", "IsOutputUnsigned"}; } #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */ diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index c8f16bb1ad769299a89d3f8a05e46960fe824711..71887ec6cba010f0208c449f6054d3d6e9be72c4 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -29,22 +29,29 @@ enum class SliceAttr { Starts, Ends, Axes, Steps }; class Slice_Op : public OperatorTensor, - public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>, - public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>> { - + public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)> { public: static const std::string Type; +private: + using Attributes_ = StaticAttributes<SliceAttr, + std::vector<std::int64_t>, + std::vector<std::int64_t>, + std::vector<std::int8_t>, + std::vector<std::int64_t>>; + template <SliceAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: Slice_Op() = delete; - using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int8_t>, std::vector<std::int64_t>>; - template <SliceAttr e> using attr = typename Attributes_::template attr<e>; Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int8_t>& axes, const std::vector<std::int64_t>& steps) : OperatorTensor(Type, 5, 0, 1), - Attributes_(attr<SliceAttr::Starts>(starts), - attr<SliceAttr::Ends>(ends), - attr<SliceAttr::Axes>(axes), - attr<SliceAttr::Steps>(steps)) + mAttributes(std::make_shared<Attributes_>( + attr<SliceAttr::Starts>(starts), + attr<SliceAttr::Ends>(ends), + attr<SliceAttr::Axes>(axes), + attr<SliceAttr::Steps>(steps))) {} @@ -55,7 +62,7 @@ public: */ Slice_Op(const Slice_Op &op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (!op.backend().empty()) { SET_IMPL_MACRO(Slice_Op, *this, op.backend()); @@ -73,10 +80,16 @@ public: std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); } bool dimsForwarded() const override final; - bool forwardDims(bool allowDataDependency = false) override final; + bool forwardDims(bool allowDataDependency = true) override final; void setBackend(const std::string &name, DeviceIdx_t device = 0) override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); } + inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); } + inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); } + inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input", "starts", "ends", "axes", "steps"}; } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 1868dc6e3df48401ef3f8a126b07572e2f45144d..4bb4c1d0caf74e7c86d2685eaf885d2e9d642ba0 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -24,24 +24,29 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class SoftmaxAttr { AxisIdx }; +enum class SoftmaxAttr { Axis }; class Softmax_Op : public OperatorTensor, public Registrable<Softmax_Op, std::string, - std::shared_ptr<OperatorImpl>(const Softmax_Op&)>, - public StaticAttributes<SoftmaxAttr, std::size_t> { + std::shared_ptr<OperatorImpl>(const Softmax_Op&)> { public: static const std::string Type; +private: + using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>; + template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: Softmax_Op() = delete; - using Attributes_ = StaticAttributes<SoftmaxAttr, std::size_t>; - template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>; - Softmax_Op(std::size_t axis) - : OperatorTensor(Type, 1, 0, 1), - Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {} + Softmax_Op(std::int32_t axis) + : OperatorTensor(Type, 1, 0, 1), + mAttributes(std::make_shared<Attributes_>( + attr<SoftmaxAttr::Axis>(axis))) + {} /** * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated). @@ -49,7 +54,7 @@ public: */ Softmax_Op(const Softmax_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl){ SET_IMPL_MACRO(Softmax_Op, *this, op.backend()); @@ -68,6 +73,10 @@ public: void setBackend(const std::string& name, DeviceIdx_t device = 0) override final; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + + inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input"}; } @@ -76,7 +85,7 @@ public: } }; -inline std::shared_ptr<Node> Softmax(std::size_t axis, const std::string& name = "") { +inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name); } } // namespace Aidge diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp index f1a7fe477fd77baf86d48c7c5bd67c6ea074a1bc..2f5c43693a562b39de7a159a0050745085d6204b 100644 --- a/include/aidge/operator/Transpose.hpp +++ b/include/aidge/operator/Transpose.hpp @@ -27,28 +27,33 @@ namespace Aidge { class TransposeImpl : public OperatorImpl { public: - TransposeImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} + TransposeImpl(const Operator& op, const std::string& backend = "") + : OperatorImpl(op, backend) + {} void forward() override; }; enum class TransposeAttr { OutputDimsOrder }; class Transpose_Op : public OperatorTensor, - public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)>, - public StaticAttributes<TransposeAttr, std::vector<DimSize_t>> { + public Registrable<Transpose_Op, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op&)> { - public: +public: static const std::string Type; - Transpose_Op() = delete; +private: using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>; - template <TransposeAttr e> - using attr = typename Attributes_::template attr<e>; + template <TransposeAttr e> using attr = typename Attributes_::template attr<e>; + const std::shared_ptr<Attributes_> mAttributes; + +public: + Transpose_Op() = delete; Transpose_Op(const std::vector<DimSize_t> &outputDimsOrder) : OperatorTensor(Type, 1, 0, 1), - Attributes_(attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder)) + mAttributes(std::make_shared<Attributes_>( + attr<TransposeAttr::OutputDimsOrder>(outputDimsOrder))) { mImpl = std::make_shared<TransposeImpl>(*this); } @@ -59,7 +64,7 @@ class Transpose_Op : public OperatorTensor, */ Transpose_Op(const Transpose_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (!op.backend().empty()) { SET_IMPL_MACRO(Transpose_Op, *this, op.backend()); @@ -81,6 +86,9 @@ class Transpose_Op : public OperatorTensor, void setBackend(const std::string &name, DeviceIdx_t device = 0) override; + inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } + inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); } + static const std::vector<std::string> getInputsName(){ return {"data_input"}; } diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp index 07123bc88aa1da22bfa98166d6a01af8d66be98d..8d4148bb49ff6092b8ba695e727bdc0750e58cb2 100644 --- a/src/operator/AvgPooling.cpp +++ b/src/operator/AvgPooling.cpp @@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM> const std::string Aidge::AvgPooling_Op<DIM>::Type = "AvgPooling"; template <Aidge::DimIdx_t DIM> -Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op): OperatorTensor(op), Attributes_(op) { +Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op) + : OperatorTensor(op), + mAttributes(op.mAttributes) +{ if (op.mImpl) { SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend()); } else { @@ -47,11 +50,11 @@ bool Aidge::AvgPooling_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { outputDims[0] = inputDims[0]; outputDims[1] = inputDims[1]; - for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) { + for (std::size_t dim = 0; dim < mAttributes->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) { outputDims[dim+2] = 1 + static_cast<DimSize_t>( std::floor(static_cast<float>(inputDims[dim+2] - - this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) / - static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim]))); + mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) / + static_cast<float>(mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[dim]))); } getOutput(0)->resize(outputDims); return true; @@ -89,10 +92,10 @@ Aidge::AvgPooling_Op<DIM>::computeReceptiveField(const std::vector<Aidge::DimSiz for (DimIdx_t i = 0; i < DIM; ++i) { inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)] + * mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)] + 1 - + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)); - inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]; + + (mAttributes->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)); + inputIdxDims[2+i] *= mAttributes->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]; } std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res; res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp index 2563ef843674725dd05e77d893de3778ae4623d2..e5fc634ebd6db047554abed0da1af4c0c3bf1ec2 100644 --- a/src/operator/BatchNorm.cpp +++ b/src/operator/BatchNorm.cpp @@ -27,7 +27,10 @@ template <Aidge::DimIdx_t DIM> const std::string Aidge::BatchNorm_Op<DIM>::Type = "BatchNorm"; template <Aidge::DimIdx_t DIM> -Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op): OperatorTensor(op), Attributes_(op) { +Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op) + : OperatorTensor(op), + mAttributes(op.mAttributes) +{ if (op.mImpl) { SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend()); } else { diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp index ee06ce69b135e11fe3ed5be8fa9f501debb6acd5..af20b8e5ce878dcc3f1853b8de8d95972a9327f9 100644 --- a/src/operator/Concat.cpp +++ b/src/operator/Concat.cpp @@ -20,7 +20,7 @@ void Aidge::Concat_OpImpl::forward() { const Concat_Op& op = dynamic_cast<const Concat_Op&>(mOp); - const DimSize_t axis = op.template getAttr<DimSize_t>("Axis"); + const DimSize_t axis = op.axis(); assert(op.getInput(0) && "missing input in Concat operator"); DataType datatypeFirstInput = op.getInput(0)->dataType(); @@ -61,35 +61,47 @@ const std::string Aidge::Concat_Op::Type = "Concat"; bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) { // Every input is non-empty with the same number of dimensions - bool associated = (getInput(0) != nullptr); - associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input - auto outputDims = getInput(0)->dims(); - const auto firstInputNbDims = getInput(0) -> nbDims(); - for (IOIndex_t i = 1; i < nbInputs(); ++i) { + for (IOIndex_t i = 0; i < nbInputs(); ++i) { if (!getInput(i)) { AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i); } + } + const std::size_t nbDimsInput0 = getInput(0)->nbDims(); + if (nbDimsInput0 == 0) { + return false; + } + AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type()); + for (IOIndex_t i = 1; i < nbInputs(); ++i) { + if (getInput(i)->nbDims() == 0) { + return false; + } + AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(), "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}", i, type(), nbDimsInput0, getInput(i)->nbDims()); + } - if (getInput(i)->nbDims() == firstInputNbDims) { - for (DimSize_t dim = 0; dim < firstInputNbDims; ++dim) { - if (dim == getAttr<ConcatAttr::Axis>()) { - outputDims[dim] += getInput(i)->dims()[dim]; - } - else { - associated &= (getInput(i)->dims()[dim] == outputDims[dim]); - } + // Check validity of attributes with inputs + // Axis + std::int32_t axis = mAttributes->template getAttr<ConcatAttr::Axis>(); + axis = (axis < 0) ? axis + static_cast<std::int32_t>(nbDimsInput0) : axis; + AIDGE_ASSERT(((axis >= 0) && (axis < static_cast<std::int32_t>(nbDimsInput0))), + "'Axis' attribute not compatible with provided inputs.") + const std::size_t axis_u64 = static_cast<std::size_t>(axis); + + // Check validity of inputs + auto outputDims = getInput(0)->dims(); + for (IOIndex_t i = 1; i < nbInputs(); ++i) { + for (DimSize_t dim = 0; dim < nbDimsInput0; ++dim) { + if (dim == axis_u64) { + outputDims[axis_u64] += getInput(i)->dims()[axis_u64]; + } + else { + AIDGE_ASSERT(getInput(i)->dims()[dim] == outputDims[dim], "Incomatible dimensions between input 0 {} and input {} {}", getInput(0)->dims(), i, getInput(i)->dims()); } } - else { - associated = false; - break; - } - } - if (associated) { - getOutput(0)->resize(outputDims); } - return associated; + getOutput(0)->resize(outputDims); + + return true; } void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) { diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp index 66e1d1f5b25c2b12f73a851d87d9f91aa4940322..9ca30b65c4e4d729d36bb790d692159ae4936b28 100644 --- a/src/operator/Conv.cpp +++ b/src/operator/Conv.cpp @@ -29,7 +29,7 @@ const std::string Aidge::Conv_Op<DIM>::Type = "Conv"; template <Aidge::DimIdx_t DIM> Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl) { SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend()); @@ -57,21 +57,21 @@ bool Aidge::Conv_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { (getInput(0)->template dims<DIM+2>()[1] == inChannels()), "Wrong input size for Conv operator."); // check optional bias - if(!this->template getAttr<ConvAttr::NoBias>()) + if(!mAttributes->template getAttr<ConvAttr::NoBias>()) AIDGE_ASSERT((getInput(2)->nbDims() == (1)) && (getInput(2)->template dims<1>()[0] == outChannels()), "Wrong bias size for Conv operator."); std::array<DimSize_t, DIM + 2> outputDims{}; const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>()); - for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) { - const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] * - (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) + + for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) { + const DimSize_t kernelExtent = mAttributes->template getAttr<ConvAttr::DilationDims>()[dim] * + (mAttributes->template getAttr<ConvAttr::KernelDims>()[dim] - 1) + 1; outputDims[dim+2] = 1 + static_cast<DimSize_t>( floor(static_cast<float>(inputDims[dim+2] - kernelExtent) / - static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim]))); + static_cast<float>(mAttributes->template getAttr<ConvAttr::StrideDims>()[dim]))); } outputDims[1] = outChannels(); @@ -113,18 +113,18 @@ Aidge::Conv_Op<DIM>::computeReceptiveField( std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]}; for (DimIdx_t i = 0; i < DIM; ++i) { inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)] + * mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)] + 1 - + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) - * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]); - inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]; + + (mAttributes->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) + * mAttributes->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]); + inputIdxDims[2+i] *= mAttributes->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]; } // Weight // same output value, every input channel is used std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]}; for (std::size_t i = 0; i < DIM; ++i) { - weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]); + weightDims.push_back(mAttributes->template getAttr<ConvAttr::KernelDims>()[i]); } std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); weightIdxDims[0] = firstEltDims[1]; @@ -135,7 +135,7 @@ Aidge::Conv_Op<DIM>::computeReceptiveField( res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims)); // Bias - if (! this->template getAttr<ConvAttr::NoBias>()){ + if (!mAttributes->template getAttr<ConvAttr::NoBias>()){ const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]}; res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims)); diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp index 77441be414847c08452c71fc2e35c4e3e5bd3c04..4f19b87e4dcff092d8e40f4ea5271febdd5ac6d8 100644 --- a/src/operator/ConvDepthWise.cpp +++ b/src/operator/ConvDepthWise.cpp @@ -30,7 +30,7 @@ const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise"; template <Aidge::DimIdx_t DIM> Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { if (op.mImpl) { SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend()); @@ -59,21 +59,21 @@ bool Aidge::ConvDepthWise_Op<DIM>::forwardDims(bool /*allowDataDependency*/) { (getInput(0)->template dims<DIM+2>()[1] == nbChannels()), "Wrong input size for Conv operator."); // check optional bias - if(!this->template getAttr<ConvDepthWiseAttr::NoBias>()) + if(!mAttributes->template getAttr<ConvDepthWiseAttr::NoBias>()) AIDGE_ASSERT((getInput(2)->nbDims() == (1)) && (getInput(2)->template dims<1>()[0] == nbChannels()), "Wrong bias size for Conv operator."); std::array<DimSize_t, DIM + 2> outputDims = {}; const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>()); - for (std::size_t dim = 0; dim < this->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) { - const DimSize_t kernelExtent = this->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] * - (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) + + for (std::size_t dim = 0; dim < mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>().size() ; ++dim) { + const DimSize_t kernelExtent = mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[dim] * + (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[dim] - 1) + 1; outputDims[dim+2] = 1 + static_cast<DimSize_t>( floor(static_cast<float>(inputDims[dim+2] - kernelExtent) / - static_cast<float>(this->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim]))); + static_cast<float>(mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[dim]))); } outputDims[1] = inputDims[1]; @@ -114,17 +114,17 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField( std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]}; for (DimIdx_t i = 0; i < DIM; ++i) { inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)] + * mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)] + 1 - + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) - * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]); - inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]; + + (mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) + * mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]); + inputIdxDims[2+i] *= mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]; } // Weight std::vector<DimSize_t> weightDims{outputDims[1], 1}; for (std::size_t i = 0; i < DIM; ++i) { - weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]); + weightDims.push_back(mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]); } std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); weightIdxDims[0] = firstEltDims[1]; @@ -135,7 +135,7 @@ Aidge::ConvDepthWise_Op<DIM>::computeReceptiveField( res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims)); res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims)); // Bias - if (! this->template getAttr<ConvDepthWiseAttr::NoBias>()){ + if (! mAttributes->template getAttr<ConvDepthWiseAttr::NoBias>()){ const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel const std::vector<DimSize_t> biasIdxDims{firstEltDims[1]}; res.push_back(std::pair<std::vector<DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims)); diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index 1d53893b1e37933ef41540202b76fdcdfca08130..2a044c607cc8634d24b66af6ae7fa10acfe6e1c7 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -64,7 +64,7 @@ bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) { nbInputFeatures, inChannels); } // check optional bias - if(!this->template getAttr<FCAttr::NoBias>()) + if(!mAttributes->template getAttr<FCAttr::NoBias>()) AIDGE_ASSERT((getInput(2)->nbDims() == 1) && (getInput(2)->template dims<1>()[0] == outChannels), "Wrong bias size for FC operator."); diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index b0b9a0e84882cae55a9a3c336684d43e208cb503..4b7a6232cad7c1b9d9af1bd1bc710871d107a746 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -22,9 +22,8 @@ void Aidge::Gather_OpImpl::forward() { const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp); - const auto axis = op.template getAttr<std::int8_t>("Axis"); - const std::size_t axisIdx = static_cast<std::size_t>(axis) + (axis >= 0 ? 0 : op.getInput(0)->dims().size()); + const std::size_t axisIdx = static_cast<std::size_t>(op.axis()) + (op.axis() >= 0 ? 0 : op.getInput(0)->dims().size()); std::size_t postAxisElems = 1; for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) { @@ -38,11 +37,11 @@ void Aidge::Gather_OpImpl::forward() { std::size_t outputOffset = 0; for (std::size_t i=0; i<preAxisElems; ++i) { - for(std::size_t j=0; j<op.template getAttr<std::vector<int64_t>>("Indices").size(); ++j) + for(std::size_t j = 0; j < op.indices().size(); ++j) { - const std::size_t idx = op.template getAttr<std::vector<int64_t>>("Indices")[j] >= 0 ? - static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j]) : - static_cast<std::size_t>(op.template getAttr<std::vector<int64_t>>("Indices")[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx])); + const std::size_t idx = op.indices()[j] >= 0 ? + static_cast<std::size_t>(op.indices()[j]) : + static_cast<std::size_t>(op.indices()[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx])); op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset); outputOffset += postAxisElems; } @@ -71,7 +70,7 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) { } if (getInput(1) && !getInput(1)->empty()) { - if (!this->template getAttr<GatherAttr::Indices>().empty()) { + if (!mAttributes->template getAttr<GatherAttr::Indices>().empty()) { Log::notice("Gather_Op: ignoring non-empty Indices attribute because input#1 takes precedence"); } @@ -81,28 +80,28 @@ bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) { } std::shared_ptr<Tensor> fallback; - this->template getAttr<GatherAttr::GatheredShape>() = getInput(1)->dims(); - this->template getAttr<GatherAttr::Indices>().clear(); // If both are provided input would override attrs - this->template getAttr<GatherAttr::Indices>().reserve(getInput(1)->size()); + mAttributes->template getAttr<GatherAttr::GatheredShape>() = getInput(1)->dims(); + mAttributes->template getAttr<GatherAttr::Indices>().clear(); // If both are provided input would override attrs + mAttributes->template getAttr<GatherAttr::Indices>().reserve(getInput(1)->size()); const auto& indices = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); std::copy_n(static_cast<int64_t*>(indices.getImpl()->hostPtr()), indices.size(), - std::back_inserter(this->template getAttr<GatherAttr::Indices>())); + std::back_inserter(mAttributes->template getAttr<GatherAttr::Indices>())); } - AIDGE_ASSERT(!this->template getAttr<GatherAttr::Indices>().empty(), "Missing input#1 or Indices attribute"); + AIDGE_ASSERT(!mAttributes->template getAttr<GatherAttr::Indices>().empty(), "Missing input#1 or Indices attribute"); std::vector<DimSize_t> outDims = getInput(0)->dims(); - std::int8_t axisIdx = this->template getAttr<GatherAttr::Axis>()>=0? - this->template getAttr<GatherAttr::Axis>(): - this->template getAttr<GatherAttr::Axis>()+outDims.size(); + std::int8_t axisIdx = mAttributes->template getAttr<GatherAttr::Axis>()>=0? + mAttributes->template getAttr<GatherAttr::Axis>(): + mAttributes->template getAttr<GatherAttr::Axis>()+outDims.size(); outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx)); - if( !this->template getAttr<GatherAttr::GatheredShape>().empty()) + if( !mAttributes->template getAttr<GatherAttr::GatheredShape>().empty()) { outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), - this->template getAttr<GatherAttr::GatheredShape>().begin(), - this->template getAttr<GatherAttr::GatheredShape>().end()); + mAttributes->template getAttr<GatherAttr::GatheredShape>().begin(), + mAttributes->template getAttr<GatherAttr::GatheredShape>().end()); } mOutputs[0]->resize(outDims); return true; diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp index e08b5f1054f07a9dcc1722d219ebce022f994d61..a055da29a2c2e603d28e64adb0e222fe569fbcf9 100644 --- a/src/operator/Memorize.cpp +++ b/src/operator/Memorize.cpp @@ -24,14 +24,13 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getNbRequiredData( Aidge::IOIndex_t inputIdx) const { const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); - const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>(); - if (scheduleStep == 0 && inputIdx == 0) { + if (op.scheduleStep() == 0 && inputIdx == 0) { // No data input is required for the initial step. // Initialization data is required however. return Elts_t::NoneElts(); } - else if (scheduleStep > 0 && inputIdx == 1) { + else if (op.scheduleStep() > 0 && inputIdx == 1) { // No initialization data is required after the initial step. return Elts_t::NoneElts(); } @@ -45,10 +44,8 @@ Aidge::Elts_t Aidge::Memorize_OpImpl::getRequiredMemory(const Aidge::IOIndex_t o assert(mOp.getRawOutput(outputIdx) && "requires valid output"); const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); - const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>(); - const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>(); - if (endStep > 0 && outputIdx == 1 && scheduleStep >= endStep) { + if ((op.endStep() > 0) && (outputIdx == 1) && (op.scheduleStep() >= op.endStep())) { return Elts_t::NoneElts(); } else { @@ -60,18 +57,15 @@ void Aidge::Memorize_OpImpl::updateConsummerProducer() { OperatorImpl::updateConsummerProducer(); const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); - const unsigned int scheduleStep = op.template getAttr<MemorizeAttr::ScheduleStep>(); - const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>(); - AIDGE_ASSERT(endStep == 0 || scheduleStep <= endStep, "cannot update consumer producer anymore, number of cycles exceeded"); + AIDGE_ASSERT(op.endStep() == 0 || op.scheduleStep() <= op.endStep(), "cannot update consumer producer anymore, number of cycles exceeded"); } void Aidge::Memorize_OpImpl::forward() { const Memorize_Op& op = dynamic_cast<const Memorize_Op&>(mOp); - const unsigned int forwardStep = op.template getAttr<MemorizeAttr::ForwardStep>(); - const unsigned int endStep = op.template getAttr<MemorizeAttr::EndStep>(); - AIDGE_ASSERT(endStep == 0 || forwardStep <= endStep, "cannot forward anymore, number of cycles exceeded"); - if (forwardStep == 0) { + AIDGE_ASSERT((op.endStep() == 0) || (op.forwardStep() <= op.endStep()), "cannot forward anymore, number of cycles exceeded"); + + if (op.forwardStep() == 0) { op.getOutput(0)->getImpl()->copy(op.getInput(1)->getImpl()->rawPtr(), op.getInput(1)->size()); } else { @@ -83,8 +77,8 @@ const std::string Aidge::Memorize_Op::Type = "Memorize"; void Aidge::Memorize_Op::updateConsummerProducer() { Operator::updateConsummerProducer(); - ++this->template getAttr<MemorizeAttr::ScheduleStep>(); - this->template getAttr<MemorizeAttr::ForwardStep>() = 0; + ++mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); + mAttributes->template getAttr<MemorizeAttr::ForwardStep>() = 0; } bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) { @@ -132,6 +126,6 @@ void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t void Aidge::Memorize_Op::forward() { Operator::forward(); - ++this->template getAttr<MemorizeAttr::ForwardStep>(); - this->template getAttr<MemorizeAttr::ScheduleStep>() = 0; + ++mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); + mAttributes->template getAttr<MemorizeAttr::ScheduleStep>() = 0; } diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index 84d42c089baecdd78c35506a693b05a8ed728fd9..a05155085ed6a947807e741cf55c944f189fca51 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -157,6 +157,8 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const { } // Set data type for parameters inputs only (weights, bias...), which are usually Producers + // TODO: Fix -> if there is no parameter input connected (e.g optional bias) then this function will fail. + // This behaviour should be decided in its own dedicated issue. for (IOIndex_t i = nbData(); i < nbInputs(); ++i) { AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type()); getInput(i)->setDataType(dataType); diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp index 18325d80a94f35878ededca839ec809000527c39..b4c4c7df82306240fcde6e0ae33271eca1420b4a 100644 --- a/src/operator/Pop.cpp +++ b/src/operator/Pop.cpp @@ -30,9 +30,9 @@ Aidge::Elts_t Aidge::Pop_OpImpl::getNbRequiredData(const Aidge::IOIndex_t inputI void Aidge::Pop_OpImpl::forward() { const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp); + assert(op.getInput(0) && "missing input #0"); - const unsigned int forwardStep = op.template getAttr<PopAttr::ForwardStep>(); - *op.getOutput(0) = op.getInput(0)->extract({forwardStep}); + *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}); } const std::string Aidge::Pop_Op::Type = "Pop"; @@ -54,7 +54,7 @@ bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) { void Aidge::Pop_Op::updateConsummerProducer() { Operator::updateConsummerProducer(); - this->template getAttr<PopAttr::ForwardStep>() = 0; + mAttributes->template getAttr<PopAttr::ForwardStep>() = 0; } void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { @@ -69,5 +69,5 @@ void Aidge::Pop_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t devic void Aidge::Pop_Op::forward() { Operator::forward(); - ++this->template getAttr<PopAttr::ForwardStep>(); + ++mAttributes->template getAttr<PopAttr::ForwardStep>(); } diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp index 7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc..2ac10993736fb679785a5bcb9b03189297e4feff 100644 --- a/src/operator/Producer.cpp +++ b/src/operator/Producer.cpp @@ -29,7 +29,8 @@ const std::string Aidge::Producer_Op::Type = "Producer"; Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, bool constant) : OperatorTensor(Type, 0, 0, 1), - Attributes_(attr<ProdAttr::Constant>(constant)) + mAttributes(std::make_shared<Attributes_>( + attr<ProdAttr::Constant>(constant))) { mOutputs[0] = tensor; // copy the pointer of the Tensor if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){ @@ -47,7 +48,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo */ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op) : OperatorTensor(op), - Attributes_(op) + mAttributes(op.mAttributes) { mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0))); if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){ diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp index 28e39b6d3387a0371c0505dc0a7b350e83a2bbaf..f8a8243b6d00bf14f8a48b1b897ec9d15dd2c9f7 100644 --- a/src/operator/ReduceMean.cpp +++ b/src/operator/ReduceMean.cpp @@ -32,7 +32,7 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) { } if (!getInput(0)->empty()) { // make Axes attribute positive - std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>(); + std::vector<std::int32_t>& axes = mAttributes->template getAttr<ReduceMeanAttr::Axes>(); std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) { if (val < 0) val+=static_cast<std::int32_t>(getInput(0)->nbDims()); @@ -41,7 +41,7 @@ bool Aidge::ReduceMean_Op::forwardDims(bool /*allowDataDependency*/) { // build output dimensions std::vector<DimSize_t> outDims = getInput(0)->dims(); - if (this->template getAttr<ReduceMeanAttr::KeepDims>()) { + if (mAttributes->template getAttr<ReduceMeanAttr::KeepDims>()) { std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; }); } else { diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index adbd5fae8a11bfc5009ed4b920d28624db71bb0d..cfdec080353b95e844fe38ecabadce8e6c290ec4 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -50,7 +50,7 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) { } if (getInput(1) && !getInput(1)->empty()) { - if (!this->template getAttr<ReshapeAttr::Shape>().empty()) { + if (!mAttributes->template getAttr<ReshapeAttr::Shape>().empty()) { Log::notice("Reshape_Op: ignoring non-empty Shape attribute because input#1 takes precedence"); } @@ -60,24 +60,24 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) { } std::shared_ptr<Tensor> fallback; - this->template getAttr<ReshapeAttr::Shape>().clear(); // If both are provided input would override attrs - this->template getAttr<ReshapeAttr::Shape>().reserve(getInput(1)->size()); + mAttributes->template getAttr<ReshapeAttr::Shape>().clear(); // If both are provided input would override attrs + mAttributes->template getAttr<ReshapeAttr::Shape>().reserve(getInput(1)->size()); const auto& shape = mInputs[1]->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); std::copy_n(static_cast<int64_t*>(shape.getImpl()->hostPtr()), shape.size(), - std::back_inserter(this->template getAttr<ReshapeAttr::Shape>())); + std::back_inserter(mAttributes->template getAttr<ReshapeAttr::Shape>())); } - AIDGE_ASSERT(!this->template getAttr<ReshapeAttr::Shape>().empty(), "Missing input#1 or Shape attribute"); + AIDGE_ASSERT(!mAttributes->template getAttr<ReshapeAttr::Shape>().empty(), "Missing input#1 or Shape attribute"); std::vector<DimSize_t> outDims; // variables to handle a negative dimension bool foundNegativeDimension = false; std::size_t outSize = 1; DimIdx_t negativeIndex = 0; - for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i) + for(std::size_t i = 0; i < mAttributes->template getAttr<ReshapeAttr::Shape>().size(); ++i) { - int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i]; + int64_t dimSize = mAttributes->template getAttr<ReshapeAttr::Shape>()[i]; if (dimSize < 0) { if (foundNegativeDimension) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator."); @@ -86,7 +86,7 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) { dimSize = 1; negativeIndex = static_cast<DimIdx_t>(i); } - else if (dimSize == 0 && !this->template getAttr<ReshapeAttr::AllowZero>()) + else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>()) { dimSize = getInput(0) -> dims()[i]; } diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp index aca13b94cb46576d515a6f12c436431d49e0652b..3204a4b9960187e8add87491ad60c720ca08c2de 100644 --- a/src/operator/Slice.cpp +++ b/src/operator/Slice.cpp @@ -54,7 +54,7 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { std::shared_ptr<Tensor> fallback; if (getInput(1) && !getInput(1)->empty()) { - if (!this->template getAttr<SliceAttr::Starts>().empty()) { + if (!mAttributes->template getAttr<SliceAttr::Starts>().empty()) { Log::notice("Slice_Op: ignoring non-empty Starts attribute because input#1 takes precedence"); } @@ -63,18 +63,18 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { return false; } - this->template getAttr<SliceAttr::Starts>().clear(); // If both are provided input would override attrs - this->template getAttr<SliceAttr::Starts>().reserve(getInput(1)->size()); + mAttributes->template getAttr<SliceAttr::Starts>().clear(); // If both are provided input would override attrs + mAttributes->template getAttr<SliceAttr::Starts>().reserve(getInput(1)->size()); const auto& starts = getInput(1)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); std::copy_n(static_cast<int64_t*>(starts.getImpl()->hostPtr()), starts.size(), - std::back_inserter(this->template getAttr<SliceAttr::Starts>())); + std::back_inserter(mAttributes->template getAttr<SliceAttr::Starts>())); } - AIDGE_ASSERT(!this->template getAttr<SliceAttr::Starts>().empty(), "Missing input#1 or Starts attribute"); + AIDGE_ASSERT(!mAttributes->template getAttr<SliceAttr::Starts>().empty(), "Missing input#1 or Starts attribute"); if (getInput(2) && !getInput(2)->empty()) { - if (!this->template getAttr<SliceAttr::Ends>().empty()) { + if (!mAttributes->template getAttr<SliceAttr::Ends>().empty()) { Log::notice("Slice_Op: ignoring non-empty Ends attribute because input#2 takes precedence"); } @@ -83,18 +83,18 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { return false; } - this->template getAttr<SliceAttr::Ends>().clear(); // If both are provided input would override attrs - this->template getAttr<SliceAttr::Ends>().reserve(getInput(2)->size()); + mAttributes->template getAttr<SliceAttr::Ends>().clear(); // If both are provided input would override attrs + mAttributes->template getAttr<SliceAttr::Ends>().reserve(getInput(2)->size()); const auto& ends = getInput(2)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); std::copy_n(static_cast<int64_t*>(ends.getImpl()->hostPtr()), ends.size(), - std::back_inserter(this->template getAttr<SliceAttr::Ends>())); + std::back_inserter(mAttributes->template getAttr<SliceAttr::Ends>())); } - AIDGE_ASSERT(!this->template getAttr<SliceAttr::Ends>().empty(), "Missing input#2 or Ends attribute"); + AIDGE_ASSERT(!mAttributes->template getAttr<SliceAttr::Ends>().empty(), "Missing input#2 or Ends attribute"); if (getInput(3) && !getInput(3)->empty()) { - if (!this->template getAttr<SliceAttr::Axes>().empty()) { + if (!mAttributes->template getAttr<SliceAttr::Axes>().empty()) { Log::notice("Slice_Op: ignoring non-empty Axes attribute because input#3 takes precedence"); } @@ -103,18 +103,18 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { return false; } - this->template getAttr<SliceAttr::Axes>().clear(); // If both are provided input would override attrs - this->template getAttr<SliceAttr::Axes>().reserve(getInput(3)->size()); + mAttributes->template getAttr<SliceAttr::Axes>().clear(); // If both are provided input would override attrs + mAttributes->template getAttr<SliceAttr::Axes>().reserve(getInput(3)->size()); const auto& axes = getInput(3)->refCastFrom(fallback, NativeType<int8_t>::type, "cpu"); std::copy_n(static_cast<int8_t*>(axes.getImpl()->hostPtr()), axes.size(), - std::back_inserter(this->template getAttr<SliceAttr::Axes>())); + std::back_inserter(mAttributes->template getAttr<SliceAttr::Axes>())); } - AIDGE_ASSERT(!this->template getAttr<SliceAttr::Axes>().empty(), "Missing input#3 or Axes attribute"); + AIDGE_ASSERT(!mAttributes->template getAttr<SliceAttr::Axes>().empty(), "Missing input#3 or Axes attribute"); if (getInput(4) && !getInput(4)->empty()) { - if (!this->template getAttr<SliceAttr::Steps>().empty()) { + if (!mAttributes->template getAttr<SliceAttr::Steps>().empty()) { Log::notice("Slice_Op: ignoring non-empty Steps attribute because input#4 takes precedence"); } @@ -123,32 +123,32 @@ bool Aidge::Slice_Op::forwardDims(bool allowDataDependency) { return false; } - this->template getAttr<SliceAttr::Steps>().clear(); // If both are provided input would override attrs - this->template getAttr<SliceAttr::Steps>().reserve(getInput(4)->size()); + mAttributes->template getAttr<SliceAttr::Steps>().clear(); // If both are provided input would override attrs + mAttributes->template getAttr<SliceAttr::Steps>().reserve(getInput(4)->size()); const auto& steps = getInput(4)->refCastFrom(fallback, NativeType<int64_t>::type, "cpu"); std::copy_n(static_cast<int64_t*>(steps.getImpl()->hostPtr()), steps.size(), - std::back_inserter(this->template getAttr<SliceAttr::Steps>())); + std::back_inserter(mAttributes->template getAttr<SliceAttr::Steps>())); } // Fill Steps attr if empty - if(this->template getAttr<SliceAttr::Steps>().empty()) { + if(mAttributes->template getAttr<SliceAttr::Steps>().empty()) { // In case the input Steps is not provided, default value is 1 - this->template getAttr<SliceAttr::Steps>() = std::vector<std::int64_t>(this->template getAttr<SliceAttr::Axes>().size(), 1); + mAttributes->template getAttr<SliceAttr::Steps>() = std::vector<std::int64_t>(mAttributes->template getAttr<SliceAttr::Axes>().size(), 1); } - const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size(); + const DimSize_t nbAxes = mAttributes->template getAttr<SliceAttr::Axes>().size(); std::vector<DimSize_t> outDims = getInput(0)->dims(); for (std::size_t i = 0; i < nbAxes; ++i) { - const DimIdx_t axis = this->template getAttr<SliceAttr::Axes>()[i] >= 0 ? - static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i]) : - static_cast<DimIdx_t>(this->template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims())); - const DimSize_t start = this->template getAttr<SliceAttr::Starts>()[i] >= 0 ? - static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i]) : - static_cast<DimSize_t>(this->template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis])); - const DimSize_t end = this->template getAttr<SliceAttr::Ends>()[i] >= 0 ? - static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i]) : - static_cast<DimSize_t>(this->template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis])); - const std::int64_t step = this->template getAttr<SliceAttr::Steps>()[i]; + const DimIdx_t axis = mAttributes->template getAttr<SliceAttr::Axes>()[i] >= 0 ? + static_cast<DimIdx_t>(mAttributes->template getAttr<SliceAttr::Axes>()[i]) : + static_cast<DimIdx_t>(mAttributes->template getAttr<SliceAttr::Axes>()[i] + static_cast<DimIdx_t>(getInput(0)->nbDims())); + const DimSize_t start = mAttributes->template getAttr<SliceAttr::Starts>()[i] >= 0 ? + static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Starts>()[i]) : + static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Starts>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis])); + const DimSize_t end = mAttributes->template getAttr<SliceAttr::Ends>()[i] >= 0 ? + static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Ends>()[i]) : + static_cast<DimSize_t>(mAttributes->template getAttr<SliceAttr::Ends>()[i] + static_cast<DimSize_t>(getInput(0)->dims()[axis])); + const std::int64_t step = mAttributes->template getAttr<SliceAttr::Steps>()[i]; AIDGE_ASSERT(step != 0, "Slice_Op: Step must be a non-zero value!"); if(step * (static_cast<int64_t>(end) - static_cast<int64_t>(start)) < 0) { diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp index 7b20366576b16868af20947a2248ae3e2df85650..0b00c9b600b86764631090297ef1fe25f9212578 100644 --- a/src/operator/Transpose.cpp +++ b/src/operator/Transpose.cpp @@ -25,7 +25,7 @@ void Aidge::TransposeImpl::forward() { const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp); - op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.getAttr<std::vector<DimSize_t>>(0)); + op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.outputDimsOrder()); } const std::string Aidge::Transpose_Op::Type = "Transpose"; @@ -37,7 +37,7 @@ bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) { } if (!getInput(0)->empty()) { - const auto& outDimsOrder = getAttr<std::vector<DimSize_t>>(0); + const auto& outDimsOrder = mAttributes->template getAttr<std::vector<DimSize_t>>(0); std::vector<DimSize_t> outputDims; for (std::size_t i = 0; i < outDimsOrder.size(); ++i) { outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);