diff --git a/include/aidge/backend/cuda/operator/AddImpl.hpp b/include/aidge/backend/cuda/operator/AddImpl.hpp index cd1819753cd00a325443d9c9c992f3d2347bb377..429d6f1b04489d9e38ce96d584a1ce9528dd0b2d 100644 --- a/include/aidge/backend/cuda/operator/AddImpl.hpp +++ b/include/aidge/backend/cuda/operator/AddImpl.hpp @@ -27,30 +27,33 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class AddImpl_cuda : public OperatorImpl { -private: - - public: - AddImpl_cuda(const Add_Op &op) : OperatorImpl(op, "cuda") {} + AddImpl_cuda(const Add_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<AddImpl_cuda> create(const Add_Op &op) { + static std::unique_ptr<AddImpl_cuda> create(const Add_Op& op) { return std::make_unique<AddImpl_cuda>(op); } -public: - void forward(); - void backward(); - // ~AddImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; + private: template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); template <class T> void backward_(const Tensor& outGrad, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); }; -namespace { -// add cuda backend to Add_Op implementation registry -static Registrar<Add_Op> registrarAddImpl_cuda("cuda", Aidge::AddImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Add_Op, "cuda", Aidge::AddImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_ADDIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/AndImpl.hpp b/include/aidge/backend/cuda/operator/AndImpl.hpp index 7033f04fa5e5c8ae80b51b3679bd6b98b855ac1e..4105ec87db2c58e218c629a1c94f31efd37c80ee 100644 --- a/include/aidge/backend/cuda/operator/AndImpl.hpp +++ b/include/aidge/backend/cuda/operator/AndImpl.hpp @@ -27,27 +27,31 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class AndImpl_cuda : public OperatorImpl { -private: - public: - AndImpl_cuda(const And_Op &op) : OperatorImpl(op, "cuda") {} + AndImpl_cuda(const And_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<AndImpl_cuda> create(const And_Op &op) { + static std::unique_ptr<AndImpl_cuda> create(const And_Op& op) { return std::make_unique<AndImpl_cuda>(op); } -public: - void forward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; private: template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); }; -namespace { -// add cuda backend to And_Op implementation registry -static Registrar<And_Op> registrarAndImpl_cuda("cuda", Aidge::AndImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(And_Op, "cuda", Aidge::AndImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_ANDIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/ArgMaxImpl.hpp b/include/aidge/backend/cuda/operator/ArgMaxImpl.hpp index b8158fe95015757eeb15f58bf5ae39171f1ca213..a89aebf96914f258f6be616b940ec195ec9ae2a9 100644 --- a/include/aidge/backend/cuda/operator/ArgMaxImpl.hpp +++ b/include/aidge/backend/cuda/operator/ArgMaxImpl.hpp @@ -27,30 +27,34 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class ArgMaxImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - - public: - ArgMaxImpl_cuda(const ArgMax_Op &op) : OperatorImpl(op, "cuda") {} + ArgMaxImpl_cuda(const ArgMax_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<ArgMaxImpl_cuda> create(const ArgMax_Op &op) { + static std::unique_ptr<ArgMaxImpl_cuda> create(const ArgMax_Op& op) { return std::make_unique<ArgMaxImpl_cuda>(op); } -public: - void forward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; private: + // CuDNN specific variables + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; + template <class T> void forward_(const Tensor& input, std::int32_t axis, DimSize_t selectLastIdx); }; -namespace { -// add cuda backend to ArgMax_Op implementation registry -static Registrar<ArgMax_Op> registrarArgMaxImpl_cuda("cuda", Aidge::ArgMaxImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(ArgMax_Op, "cuda", Aidge::ArgMaxImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_ARGMAXIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cuda/operator/AvgPoolingImpl.hpp index 540ec574f9b5fbcea8b8f28e390cbe05f1e0fa8e..7f8fb4075affd3e5f17533ea67b051dbb6395f04 100644 --- a/include/aidge/backend/cuda/operator/AvgPoolingImpl.hpp +++ b/include/aidge/backend/cuda/operator/AvgPoolingImpl.hpp @@ -27,35 +27,41 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend template <DimIdx_t DIM> class AvgPoolingImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - cudnnPoolingDescriptor_t mAvgPoolingDesc = nullptr; - cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - public: - AvgPoolingImpl_cuda(const AvgPooling_Op<DIM> &op) : OperatorImpl(op, "cuda") {} + AvgPoolingImpl_cuda(const AvgPooling_Op<DIM>& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<AvgPoolingImpl_cuda> create(const AvgPooling_Op<2> &op) { + static std::unique_ptr<AvgPoolingImpl_cuda> create(const AvgPooling_Op<DIM>& op) { return std::make_unique<AvgPoolingImpl_cuda>(op); } -public: - void forward(); - void backward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; ~AvgPoolingImpl_cuda(); private: + // CuDNN specific variables + cudnnPoolingDescriptor_t mAvgPoolingDesc = nullptr; + cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; + template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& output_grad); }; -namespace { -// add cuda backend to AvgPooling_Op<2> implementation registry -static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl_cuda("cuda", Aidge::AvgPoolingImpl_cuda<2>::create); -} // namespace +// Implementation entry point registration to Operator +using AvgPooling2D_Op = AvgPooling_Op<2>; +REGISTRAR(AvgPooling2D_Op, "cuda", Aidge::AvgPoolingImpl_cuda<2>::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_AVGPOOLINGIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/BatchNormImpl.hpp b/include/aidge/backend/cuda/operator/BatchNormImpl.hpp index 3451d07f289371202570434f96546344c0c4fb26..5ba8656ef8a25ffa53584641a938f637ecff9b94 100644 --- a/include/aidge/backend/cuda/operator/BatchNormImpl.hpp +++ b/include/aidge/backend/cuda/operator/BatchNormImpl.hpp @@ -27,35 +27,41 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend template <DimIdx_t DIM> class BatchNormImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - cudnnTensorDescriptor_t mBNDesc = nullptr; - cudnnBatchNormMode_t mMode; - double mEpsilon; - public: - BatchNormImpl_cuda(const BatchNorm_Op<DIM> &op) : OperatorImpl(op, "cuda") {} + BatchNormImpl_cuda(const BatchNorm_Op<DIM>& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<BatchNormImpl_cuda> create(const BatchNorm_Op<DIM> &op) { + static std::unique_ptr<BatchNormImpl_cuda> create(const BatchNorm_Op<DIM>& op) { return std::make_unique<BatchNormImpl_cuda>(op); } -public: - void forward(); - void backward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; ~BatchNormImpl_cuda(); private: + // CuDNN specific variables + cudnnTensorDescriptor_t mBNDesc = nullptr; + cudnnBatchNormMode_t mMode; + double mEpsilon; + template <class T> void forward_(const Tensor& input0, const Tensor& input1, const Tensor& input2, const Tensor& input3, const Tensor& input4); template <class T> void backward_(const Tensor& input0, const Tensor& input1, const Tensor& input2); }; -namespace { -// add cuda backend to BatchNorm_Op<2> implementation registry -static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl_cuda("cuda", Aidge::BatchNormImpl_cuda<2>::create); -} // namespace +// Implementation entry point registration to Operator +using BatchNorm2D_Op = BatchNorm_Op<2>; +REGISTRAR(BatchNorm2D_Op, "cuda", Aidge::BatchNormImpl_cuda<2>::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_BATCHNORMIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/ConvImpl.hpp b/include/aidge/backend/cuda/operator/ConvImpl.hpp index 458acaf89a4edc59e881ee5eb38502cf849ca341..ce94ec6695735c93d5c8d0acfdc6153e91e7147d 100644 --- a/include/aidge/backend/cuda/operator/ConvImpl.hpp +++ b/include/aidge/backend/cuda/operator/ConvImpl.hpp @@ -29,8 +29,30 @@ namespace Aidge { +// Operator implementation entry point for the backend template <DimIdx_t DIM> class ConvImpl_cuda : public OperatorImpl { +public: + ConvImpl_cuda(const Operator&op, bool depthWise = false) : OperatorImpl(op, "cuda"), mDepthWise(depthWise) {} + + static std::unique_ptr<ConvImpl_cuda<DIM>> create(const Conv_Op<DIM>& op) { + return std::make_unique<ConvImpl_cuda<DIM>>(op); + } + + static std::unique_ptr<ConvImpl_cuda<DIM>> createDW(const ConvDepthWise_Op<DIM> &op) { + return std::make_unique<ConvImpl_cuda<DIM>>(op, true); + } + + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Any} + }; + } + + void forward() override; + void backward() override; + ~ConvImpl_cuda(); + private: // CuDNN specific variables cudnnConvolutionDescriptor_t mConvDesc = nullptr; @@ -46,31 +68,15 @@ private: std::shared_ptr<Tensor> mInput2Fallback; bool mDepthWise = false; -public: - ConvImpl_cuda(const Operator&op, bool depthWise = false) : OperatorImpl(op, "cuda"), mDepthWise(depthWise) {} - - static std::unique_ptr<ConvImpl_cuda> create(const Conv_Op<DIM> &op) { - return std::make_unique<ConvImpl_cuda>(op); - } - - static std::unique_ptr<ConvImpl_cuda> createDW(const ConvDepthWise_Op<DIM> &op) { - return std::make_unique<ConvImpl_cuda>(op, true); - } - -public: - void forward(); - void backward(); - ~ConvImpl_cuda(); - -private: template <class T> void forward_(const Tensor& input0, const Tensor& input1, const Tensor& input2); template <class T> void backward_(const Tensor& input0, const Tensor& input1, const Tensor& input2); }; -namespace { -static Registrar<Conv_Op<2>> registrarConvImpl_cuda("cuda", Aidge::ConvImpl_cuda<2>::create); -static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl_cuda("cuda", Aidge::ConvImpl_cuda<2>::createDW); -} // namespace +// Implementation entry point registration to Operator +using Conv2D_Op = Conv_Op<2>; +using ConvDepthWise2D_Op = ConvDepthWise_Op<2>; +REGISTRAR(Conv2D_Op, "cuda", Aidge::ConvImpl_cuda<2>::create); +REGISTRAR(ConvDepthWise2D_Op, "cuda", Aidge::ConvImpl_cuda<2>::createDW); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_CONVIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/DivImpl.hpp b/include/aidge/backend/cuda/operator/DivImpl.hpp index 4e8b5ed6b38c64a3a9d9aa0f34664032cd602813..4b15445cb791aa1cf2520018d1015e19aaf10ce3 100644 --- a/include/aidge/backend/cuda/operator/DivImpl.hpp +++ b/include/aidge/backend/cuda/operator/DivImpl.hpp @@ -27,30 +27,33 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class DivImpl_cuda : public OperatorImpl { -private: - - public: - DivImpl_cuda(const Div_Op &op) : OperatorImpl(op, "cuda") {} + DivImpl_cuda(const Div_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<DivImpl_cuda> create(const Div_Op &op) { + static std::unique_ptr<DivImpl_cuda> create(const Div_Op& op) { return std::make_unique<DivImpl_cuda>(op); } -public: - void forward(); - void backward(); - // ~DivImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; + private: template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); template <class T> void backward_(const Tensor& outGrad); }; -namespace { -// add cuda backend to Div_Op implementation registry -static Registrar<Div_Op> registrarDivImpl_cuda("cuda", Aidge::DivImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Div_Op, "cuda", Aidge::DivImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_DIVIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/FCImpl.hpp b/include/aidge/backend/cuda/operator/FCImpl.hpp index 46f7849d1f17aab5496bdbde013ef078ad1f5a7c..f2dd0c90c0096a1b57fb6860e5991d0c1e824be9 100644 --- a/include/aidge/backend/cuda/operator/FCImpl.hpp +++ b/include/aidge/backend/cuda/operator/FCImpl.hpp @@ -27,34 +27,37 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class FCImpl_cuda : public OperatorImpl { -private: - std::shared_ptr<Tensor> mInput0Fallback; - std::shared_ptr<Tensor> mInput1Fallback; - std::shared_ptr<Tensor> mInput2Fallback; - - public: - FCImpl_cuda(const FC_Op &op) : OperatorImpl(op, "cuda") {} + FCImpl_cuda(const FC_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<FCImpl_cuda> create(const FC_Op &op) { + static std::unique_ptr<FCImpl_cuda> create(const FC_Op& op) { return std::make_unique<FCImpl_cuda>(op); } -public: - void forward(); - void backward(); - // ~FCImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; private: + std::shared_ptr<Tensor> mInput0Fallback; + std::shared_ptr<Tensor> mInput1Fallback; + std::shared_ptr<Tensor> mInput2Fallback; + template <class T> void forward_(const Tensor& input0, const Tensor& input1, const Tensor& input2, std::size_t outChannels); template <class T> void backward_(const Tensor& input0, const Tensor& input1, const Tensor& input2, std::size_t outChannels); }; -namespace { -// add cuda backend to FC_Op implementation registry -static Registrar<FC_Op> registrarFCImpl_cuda("cuda", Aidge::FCImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(FC_Op, "cuda", Aidge::FCImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_FCIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/GlobalAveragePoolingImpl.hpp b/include/aidge/backend/cuda/operator/GlobalAveragePoolingImpl.hpp index 6e0fad5c01efb6474f527dee0bfbfdc594788bc6..3f0386dcfa68d4b55bebeb524dfedfd5edeb0fe9 100644 --- a/include/aidge/backend/cuda/operator/GlobalAveragePoolingImpl.hpp +++ b/include/aidge/backend/cuda/operator/GlobalAveragePoolingImpl.hpp @@ -27,34 +27,37 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class GlobalAveragePoolingImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - cudnnPoolingDescriptor_t mGlobalAveragePoolingDesc = nullptr; - cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - public: - GlobalAveragePoolingImpl_cuda(const GlobalAveragePooling_Op &op) : OperatorImpl(op, "cuda") {} + GlobalAveragePoolingImpl_cuda(const GlobalAveragePooling_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<GlobalAveragePoolingImpl_cuda> create(const GlobalAveragePooling_Op &op) { + static std::unique_ptr<GlobalAveragePoolingImpl_cuda> create(const GlobalAveragePooling_Op& op) { return std::make_unique<GlobalAveragePoolingImpl_cuda>(op); } -public: - void forward(); - void backward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Any} + }; + } + + void forward() override; + void backward() override; ~GlobalAveragePoolingImpl_cuda(); private: + // CuDNN specific variables + cudnnPoolingDescriptor_t mGlobalAveragePoolingDesc = nullptr; + cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; + template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& output_grad); }; -namespace { -// add cuda backend to GlobalAveragePooling_Op implementation registry -static Registrar<GlobalAveragePooling_Op> registrarGlobalAveragePoolingImpl_cuda("cuda", Aidge::GlobalAveragePoolingImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(GlobalAveragePooling_Op, "cuda", Aidge::GlobalAveragePoolingImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/LnImpl.hpp b/include/aidge/backend/cuda/operator/LnImpl.hpp index 442d8905b722e99bd16c219403f87c9dd1258548..1617754fbf5dd52e099a9787a25a827851933af9 100644 --- a/include/aidge/backend/cuda/operator/LnImpl.hpp +++ b/include/aidge/backend/cuda/operator/LnImpl.hpp @@ -27,32 +27,36 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class LnImpl_cuda : public OperatorImpl { -private: - std::shared_ptr<Tensor> mInputFallback; - std::shared_ptr<Tensor> mOutputGradFallback; - public: - LnImpl_cuda(const Ln_Op &op) : OperatorImpl(op, "cuda") {} + LnImpl_cuda(const Ln_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<LnImpl_cuda> create(const Ln_Op &op) { + static std::unique_ptr<LnImpl_cuda> create(const Ln_Op& op) { return std::make_unique<LnImpl_cuda>(op); } -public: - void forward(); - void backward(); - // ~LnImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; private: + std::shared_ptr<Tensor> mInputFallback; + std::shared_ptr<Tensor> mOutputGradFallback; + template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& output_grad); }; -namespace { -// add cuda backend to Ln_Op implementation registry -static Registrar<Ln_Op> registrarLnImpl_cuda("cuda", Aidge::LnImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Ln_Op, "cuda", Aidge::LnImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_LNIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cuda/operator/MaxPoolingImpl.hpp index db7f1e376013db52aeb1b27f8cc3ff192c7f0629..a203e761beaeccec96b36bbd5a424a193cdb6387 100644 --- a/include/aidge/backend/cuda/operator/MaxPoolingImpl.hpp +++ b/include/aidge/backend/cuda/operator/MaxPoolingImpl.hpp @@ -27,35 +27,39 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend template <DimIdx_t DIM> class MaxPoolingImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - cudnnPoolingDescriptor_t mMaxPoolingDesc = nullptr; - cudnnPoolingMode_t mMode = CUDNN_POOLING_MAX; - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - public: - MaxPoolingImpl_cuda(const MaxPooling_Op<DIM> &op) : OperatorImpl(op, "cuda") {} + MaxPoolingImpl_cuda(const MaxPooling_Op<DIM>& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<MaxPoolingImpl_cuda> create(const MaxPooling_Op<2> &op) { + static std::unique_ptr<MaxPoolingImpl_cuda> create(const MaxPooling_Op<DIM>& op) { return std::make_unique<MaxPoolingImpl_cuda>(op); } -public: - void forward(); - void backward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Any} + }; + } + + void forward() override; + void backward() override; ~MaxPoolingImpl_cuda(); private: + // CuDNN specific variables + cudnnPoolingDescriptor_t mMaxPoolingDesc = nullptr; + cudnnPoolingMode_t mMode = CUDNN_POOLING_MAX; + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; + template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& output_grad); }; -namespace { -// add cuda backend to MaxPooling_Op<2> implementation registry -static Registrar<MaxPooling_Op<2>> registrarMaxPoolingImpl_cuda("cuda", Aidge::MaxPoolingImpl_cuda<2>::create); -} // namespace +// Implementation entry point registration to Operator +using MaxPooling2D_Op = MaxPooling_Op<2>; +REGISTRAR(MaxPooling2D_Op, "cuda", Aidge::MaxPoolingImpl_cuda<2>::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_MAXPOOLINGIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/MulImpl.hpp b/include/aidge/backend/cuda/operator/MulImpl.hpp index 828a299655e854c823d1abcdd29fc1ef5b4da0cd..37d3d5a0df7b63dc63ad13737d8a8b463bf315c8 100644 --- a/include/aidge/backend/cuda/operator/MulImpl.hpp +++ b/include/aidge/backend/cuda/operator/MulImpl.hpp @@ -27,29 +27,33 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class MulImpl_cuda : public OperatorImpl { -private: - public: - MulImpl_cuda(const Mul_Op &op) : OperatorImpl(op, "cuda") {} + MulImpl_cuda(const Mul_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<MulImpl_cuda> create(const Mul_Op &op) { + static std::unique_ptr<MulImpl_cuda> create(const Mul_Op& op) { return std::make_unique<MulImpl_cuda>(op); } -public: - void forward(); - void backward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; private: template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); template <class T> void backward_(const Tensor& outputGrad, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); }; -namespace { -// add cuda backend to Mul_Op implementation registry -static Registrar<Mul_Op> registrarMulImpl_cuda("cuda", Aidge::MulImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Mul_Op, "cuda", Aidge::MulImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_MULIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/PadImpl.hpp b/include/aidge/backend/cuda/operator/PadImpl.hpp index 4452d3408e7b4780c1e5c4ea6553ba0b713df231..d51361d6ee5a3ec9a858d290b3f5fe5251b6fa97 100644 --- a/include/aidge/backend/cuda/operator/PadImpl.hpp +++ b/include/aidge/backend/cuda/operator/PadImpl.hpp @@ -27,35 +27,41 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend template <DimIdx_t DIM> class PadImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - int mLeftPad, mTopPad; - double mPadVal; - unsigned int mPadType; - public: - PadImpl_cuda(const Pad_Op<DIM> &op) : OperatorImpl(op, "cuda") {} + PadImpl_cuda(const Pad_Op<DIM>& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<PadImpl_cuda> create(const Pad_Op<2> &op) { + static std::unique_ptr<PadImpl_cuda> create(const Pad_Op<DIM>& op) { return std::make_unique<PadImpl_cuda>(op); } -public: - void forward(); - void backward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; private: + // CuDNN specific variables + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; + int mLeftPad, mTopPad; + double mPadVal; + unsigned int mPadType; + template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& outGrad); }; -namespace { -// add cuda backend to Pad_Op<2> implementation registry -static Registrar<Pad_Op<2>> registrarPadImpl_cuda("cuda", Aidge::PadImpl_cuda<2>::create); -} // namespace +// Implementation entry point registration to Operator +using Pad2D_Op = Pad_Op<2>; +REGISTRAR(Pad2D_Op, "cuda", Aidge::PadImpl_cuda<2>::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_PADIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/PowImpl.hpp b/include/aidge/backend/cuda/operator/PowImpl.hpp index cbaf6e0a6bd7f084538f1e49b2046ff6abd5b533..403648d9a294ee598f117c8b05e6f0875e998307 100644 --- a/include/aidge/backend/cuda/operator/PowImpl.hpp +++ b/include/aidge/backend/cuda/operator/PowImpl.hpp @@ -27,30 +27,33 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class PowImpl_cuda : public OperatorImpl { -private: - - public: - PowImpl_cuda(const Pow_Op &op) : OperatorImpl(op, "cuda") {} + PowImpl_cuda(const Pow_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<PowImpl_cuda> create(const Pow_Op &op) { + static std::unique_ptr<PowImpl_cuda> create(const Pow_Op& op) { return std::make_unique<PowImpl_cuda>(op); } -public: - void forward(); - void backward(); - // ~PowImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; + private: template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); template <class T> void backward_(const Tensor& outGrad); }; -namespace { -// add cuda backend to Pow_Op implementation registry -static Registrar<Pow_Op> registrarPowImpl_cuda("cuda", Aidge::PowImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Pow_Op, "cuda", Aidge::PowImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_POWIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/ReLUImpl.hpp b/include/aidge/backend/cuda/operator/ReLUImpl.hpp index 285713f460b9d5b5e868c0c07ab23804f30dd694..344923ba1ee08642a3e3e5f685bfd2c7de8a74b4 100644 --- a/include/aidge/backend/cuda/operator/ReLUImpl.hpp +++ b/include/aidge/backend/cuda/operator/ReLUImpl.hpp @@ -27,7 +27,25 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class ReLUImpl_cuda : public OperatorImpl { +public: + ReLUImpl_cuda(const ReLU_Op& op) : OperatorImpl(op, "cuda") {} + + static std::unique_ptr<ReLUImpl_cuda> create(const ReLU_Op& op) { + return std::make_unique<ReLUImpl_cuda>(op); + } + + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Any} + }; + } + + void forward() override; + void backward() override; + ~ReLUImpl_cuda(); + private: // CuDNN specific variables #if CUDNN_VERSION >= 5000 @@ -38,27 +56,12 @@ private: std::shared_ptr<Tensor> mInputFallback; std::shared_ptr<Tensor> mOutputGradFallback; -public: - ReLUImpl_cuda(const ReLU_Op &op) : OperatorImpl(op, "cuda") {} - - static std::unique_ptr<ReLUImpl_cuda> create(const ReLU_Op &op) { - return std::make_unique<ReLUImpl_cuda>(op); - } - -public: - void forward(); - void backward(); - ~ReLUImpl_cuda(); - -private: template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& output_grad); }; -namespace { -// add cuda backend to ReLU_Op implementation registry -static Registrar<ReLU_Op> registrarReLUImpl_cuda("cuda", Aidge::ReLUImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(ReLU_Op, "cuda", Aidge::ReLUImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_RELUIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cuda/operator/ReduceMeanImpl.hpp index 923699ce83caca20cfc4600edc8b39246d9c4692..a50ff21b35f0b062c6a9c327ea2892c15055a175 100644 --- a/include/aidge/backend/cuda/operator/ReduceMeanImpl.hpp +++ b/include/aidge/backend/cuda/operator/ReduceMeanImpl.hpp @@ -27,33 +27,36 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class ReduceMeanImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - - public: - ReduceMeanImpl_cuda(const ReduceMean_Op &op) : OperatorImpl(op, "cuda") {} + ReduceMeanImpl_cuda(const ReduceMean_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<ReduceMeanImpl_cuda> create(const ReduceMean_Op &op) { + static std::unique_ptr<ReduceMeanImpl_cuda> create(const ReduceMean_Op& op) { return std::make_unique<ReduceMeanImpl_cuda>(op); } -public: - void forward(); - void backward(); - // ~ReduceMeanImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; private: + // CuDNN specific variables + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; + template <class T> void forward_(const Tensor& input, const std::vector<int>& axes, bool keepDims); template <class T> void backward_(const Tensor& output_grad, const std::vector<int>& axes); }; -namespace { -// add cuda backend to ReduceMean_Op implementation registry -static Registrar<ReduceMean_Op> registrarReduceMeanImpl_cuda("cuda", Aidge::ReduceMeanImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(ReduceMean_Op, "cuda", Aidge::ReduceMeanImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_REDUCEMEANIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/ReduceSumImpl.hpp b/include/aidge/backend/cuda/operator/ReduceSumImpl.hpp index fd6c04616f7f73b34d81c6ee7b49ca936ccd7714..a5a7ae48d7e5bd8f370964d7f81795ecbaa5986b 100644 --- a/include/aidge/backend/cuda/operator/ReduceSumImpl.hpp +++ b/include/aidge/backend/cuda/operator/ReduceSumImpl.hpp @@ -27,32 +27,36 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class ReduceSumImpl_cuda : public OperatorImpl { -private: - // CuDNN specific variables - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - - public: - ReduceSumImpl_cuda(const ReduceSum_Op &op) : OperatorImpl(op, "cuda") {} + ReduceSumImpl_cuda(const ReduceSum_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<ReduceSumImpl_cuda> create(const ReduceSum_Op &op) { + static std::unique_ptr<ReduceSumImpl_cuda> create(const ReduceSum_Op& op) { return std::make_unique<ReduceSumImpl_cuda>(op); } -public: - void forward(); - void backward(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; private: + // CuDNN specific variables + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; + template <class T> void forward_(const Tensor& input, const std::vector<int>& axes, bool keepDims); template <class T> void backward_(const Tensor& output_grad, const std::vector<int>& axes); }; -namespace { -// add cuda backend to ReduceSum_Op implementation registry -static Registrar<ReduceSum_Op> registrarReduceSumImpl_cuda("cuda", Aidge::ReduceSumImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(ReduceSum_Op, "cuda", Aidge::ReduceSumImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_REDUCESUMIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/ReshapeImpl.hpp b/include/aidge/backend/cuda/operator/ReshapeImpl.hpp index 7b43df680bef115310669f0d55f2f78ef4fe9fa6..d412590c63f925806973038d67ee18e0847f79c2 100644 --- a/include/aidge/backend/cuda/operator/ReshapeImpl.hpp +++ b/include/aidge/backend/cuda/operator/ReshapeImpl.hpp @@ -27,27 +27,32 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class ReshapeImpl_cuda : public OperatorImpl { -private: - std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; - public: - ReshapeImpl_cuda(const Reshape_Op &op) : OperatorImpl(op, "cuda") {} + ReshapeImpl_cuda(const Reshape_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<ReshapeImpl_cuda> create(const Reshape_Op &op) { + static std::unique_ptr<ReshapeImpl_cuda> create(const Reshape_Op& op) { return std::make_unique<ReshapeImpl_cuda>(op); } -public: - void forward(); - void backward(); - ~ReshapeImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; + +private: + std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback; }; -namespace { -// add cuda backend to Reshape_Op implementation registry -static Registrar<Reshape_Op> registrarReshapeImpl_cuda("cuda", Aidge::ReshapeImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Reshape_Op, "cuda", Aidge::ReshapeImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_RESHAPEIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/ShiftGELUImpl.hpp b/include/aidge/backend/cuda/operator/ShiftGELUImpl.hpp index c4c6dc6eb57261dd230c023722a131b8858f5951..6eee6c12ce5d4efaa4dbec3f99dc35951c8087eb 100644 --- a/include/aidge/backend/cuda/operator/ShiftGELUImpl.hpp +++ b/include/aidge/backend/cuda/operator/ShiftGELUImpl.hpp @@ -29,29 +29,33 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class ShiftGELUImpl_cuda : public OperatorImpl { -private: - std::shared_ptr<Tensor> mInputFallback; public: - ShiftGELUImpl_cuda(const ShiftGELU_Op &op) : OperatorImpl(op, "cuda") {} + ShiftGELUImpl_cuda(const ShiftGELU_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<ShiftGELUImpl_cuda> create(const ShiftGELU_Op &op) { + static std::unique_ptr<ShiftGELUImpl_cuda> create(const ShiftGELU_Op& op) { return std::make_unique<ShiftGELUImpl_cuda>(op); } -public: - void forward(); - //~ShiftGELUImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; private: + std::shared_ptr<Tensor> mInputFallback; + template <class T> void forward_(const Tensor& input); - }; -namespace { -// add cuda backend to ShiftGELU_Op implementation registry -static Registrar<ShiftGELU_Op> registrarShiftGELUImpl_cuda("cuda", Aidge::ShiftGELUImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(ShiftGELU_Op, "cuda", Aidge::ShiftGELUImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_SHIFTGELUIMPL_H_ */ \ No newline at end of file diff --git a/include/aidge/backend/cuda/operator/ShiftMaxImpl.hpp b/include/aidge/backend/cuda/operator/ShiftMaxImpl.hpp index 8d72ba0b15cb3d9a91eedab2c2eab1758d0ee00f..bce533158e3a8fffdf798a07df5cc9735a836fa8 100644 --- a/include/aidge/backend/cuda/operator/ShiftMaxImpl.hpp +++ b/include/aidge/backend/cuda/operator/ShiftMaxImpl.hpp @@ -29,29 +29,33 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class ShiftMaxImpl_cuda : public OperatorImpl { -private: - std::shared_ptr<Tensor> mInputFallback; public: - ShiftMaxImpl_cuda(const ShiftMax_Op &op) : OperatorImpl(op, "cuda") {} + ShiftMaxImpl_cuda(const ShiftMax_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<ShiftMaxImpl_cuda> create(const ShiftMax_Op &op) { + static std::unique_ptr<ShiftMaxImpl_cuda> create(const ShiftMax_Op& op) { return std::make_unique<ShiftMaxImpl_cuda>(op); } -public: - void forward(); - //~ShiftMaxImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; private: + std::shared_ptr<Tensor> mInputFallback; + template <class T> void forward_(const Tensor& input); - }; -namespace { -// add cuda backend to ShiftMax_Op implementation registry -static Registrar<ShiftMax_Op> registrarShiftMaxImpl_cuda("cuda", Aidge::ShiftMaxImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(ShiftMax_Op, "cuda", Aidge::ShiftMaxImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_SHIFTMAXIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/SigmoidImpl.hpp b/include/aidge/backend/cuda/operator/SigmoidImpl.hpp index 90dbb717732ad788b868fdc95eb55579a5e0b9f6..bc29b9e5f53716641a692cd63c29f4600f3cdd02 100644 --- a/include/aidge/backend/cuda/operator/SigmoidImpl.hpp +++ b/include/aidge/backend/cuda/operator/SigmoidImpl.hpp @@ -27,7 +27,25 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class SigmoidImpl_cuda : public OperatorImpl { +public: + SigmoidImpl_cuda(const Sigmoid_Op& op) : OperatorImpl(op, "cuda") {} + + static std::unique_ptr<SigmoidImpl_cuda> create(const Sigmoid_Op& op) { + return std::make_unique<SigmoidImpl_cuda>(op); + } + + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Any} + }; + } + + void forward() override; + void backward() override; + ~SigmoidImpl_cuda(); + private: // CuDNN specific variables #if CUDNN_VERSION >= 5000 @@ -38,27 +56,12 @@ private: std::shared_ptr<Tensor> mInputFallback; std::shared_ptr<Tensor> mOutputGradFallback; -public: - SigmoidImpl_cuda(const Sigmoid_Op &op) : OperatorImpl(op, "cuda") {} - - static std::unique_ptr<SigmoidImpl_cuda> create(const Sigmoid_Op &op) { - return std::make_unique<SigmoidImpl_cuda>(op); - } - -public: - void forward(); - void backward(); - ~SigmoidImpl_cuda(); - -private: template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& output_grad); }; -namespace { -// add cuda backend to Sigmoid_Op implementation registry -static Registrar<Sigmoid_Op> registrarSigmoidImpl_cuda("cuda", Aidge::SigmoidImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Sigmoid_Op, "cuda", Aidge::SigmoidImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_SIGMOIDIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/SubImpl.hpp b/include/aidge/backend/cuda/operator/SubImpl.hpp index fd1a76692abdf16b9854b90f535f68329ae5877a..45c833f3e7f9f25258469a4d1e34e8598df068ef 100644 --- a/include/aidge/backend/cuda/operator/SubImpl.hpp +++ b/include/aidge/backend/cuda/operator/SubImpl.hpp @@ -27,30 +27,33 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class SubImpl_cuda : public OperatorImpl { -private: - - public: - SubImpl_cuda(const Sub_Op &op) : OperatorImpl(op, "cuda") {} + SubImpl_cuda(const Sub_Op& op) : OperatorImpl(op, "cuda") {} - static std::unique_ptr<SubImpl_cuda> create(const Sub_Op &op) { + static std::unique_ptr<SubImpl_cuda> create(const Sub_Op& op) { return std::make_unique<SubImpl_cuda>(op); } -public: - void forward(); - void backward(); - // ~SubImpl_cuda(); + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Float64}, + {DataType::Float32}, + {DataType::Float16}, + }; + } + + void forward() override; + void backward() override; + private: template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); template <class T> void backward_(const Tensor& outGrad, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides); }; -namespace { -// add cuda backend to Sub_Op implementation registry -static Registrar<Sub_Op> registrarSubImpl_cuda("cuda", Aidge::SubImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Sub_Op, "cuda", Aidge::SubImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_SUBIMPL_H_ */ diff --git a/include/aidge/backend/cuda/operator/TanhImpl.hpp b/include/aidge/backend/cuda/operator/TanhImpl.hpp index 35e879513fee0ec9354edecefd3d53860e54a0b1..166acd6adee397a3f284363a9db1e71152467b94 100644 --- a/include/aidge/backend/cuda/operator/TanhImpl.hpp +++ b/include/aidge/backend/cuda/operator/TanhImpl.hpp @@ -27,7 +27,25 @@ #include "aidge/backend/cuda/utils/CudaUtils.hpp" namespace Aidge { +// Operator implementation entry point for the backend class TanhImpl_cuda : public OperatorImpl { +public: + TanhImpl_cuda(const Tanh_Op& op) : OperatorImpl(op, "cuda") {} + + static std::unique_ptr<TanhImpl_cuda> create(const Tanh_Op& op) { + return std::make_unique<TanhImpl_cuda>(op); + } + + virtual std::set<ImplSpec> getAvailableImplSpecs() const override { + return { + {DataType::Any} + }; + } + + void forward() override; + void backward() override; + ~TanhImpl_cuda(); + private: // CuDNN specific variables #if CUDNN_VERSION >= 5000 @@ -38,27 +56,12 @@ private: std::shared_ptr<Tensor> mInputFallback; std::shared_ptr<Tensor> mOutputGradFallback; -public: - TanhImpl_cuda(const Tanh_Op &op) : OperatorImpl(op, "cuda") {} - - static std::unique_ptr<TanhImpl_cuda> create(const Tanh_Op &op) { - return std::make_unique<TanhImpl_cuda>(op); - } - -public: - void forward(); - void backward(); - ~TanhImpl_cuda(); - -private: template <class T> void forward_(const Tensor& input); template <class T> void backward_(const Tensor& output_grad); }; -namespace { -// add cuda backend to Tanh_Op implementation registry -static Registrar<Tanh_Op> registrarTanhImpl_cuda("cuda", Aidge::TanhImpl_cuda::create); -} // namespace +// Implementation entry point registration to Operator +REGISTRAR(Tanh_Op, "cuda", Aidge::TanhImpl_cuda::create); } // namespace Aidge #endif /* AIDGE_BACKEND_CUDA_OPERATOR_TANHIMPL_H_ */ diff --git a/src/operator/ReshapeImpl.cpp b/src/operator/ReshapeImpl.cpp index 8016a5a9d1dfc26454af2cb03b6fe573820245f5..783e244057b0fc42a782fd363c3a99aa6d73b46b 100644 --- a/src/operator/ReshapeImpl.cpp +++ b/src/operator/ReshapeImpl.cpp @@ -39,8 +39,3 @@ void Aidge::ReshapeImpl_cuda::backward() { std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->grad() -> getImpl() -> setRawPtr(output_grad.getImpl()->rawPtr(), output_grad.getImpl()->size()); } - -Aidge::ReshapeImpl_cuda::~ReshapeImpl_cuda() { - -} -