Skip to content
Snippets Groups Projects
Commit 2ef7b553 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Adaptation to backend_export

parent 63aa1b0e
No related branches found
No related tags found
2 merge requests!38version 0.3.0,!35Refactor OperatorImpl for backend/export
Pipeline #55465 passed
Showing
with 365 additions and 283 deletions
......@@ -27,30 +27,33 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class AddImpl_cuda : public OperatorImpl {
private:
public:
AddImpl_cuda(const Add_Op &op) : OperatorImpl(op, "cuda") {}
AddImpl_cuda(const Add_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<AddImpl_cuda> create(const Add_Op &op) {
static std::unique_ptr<AddImpl_cuda> create(const Add_Op& op) {
return std::make_unique<AddImpl_cuda>(op);
}
public:
void forward();
void backward();
// ~AddImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides);
template <class T> void backward_(const Tensor& outGrad, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides);
};
namespace {
// add cuda backend to Add_Op implementation registry
static Registrar<Add_Op> registrarAddImpl_cuda("cuda", Aidge::AddImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(Add_Op, "cuda", Aidge::AddImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_ADDIMPL_H_ */
......@@ -27,27 +27,31 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class AndImpl_cuda : public OperatorImpl {
private:
public:
AndImpl_cuda(const And_Op &op) : OperatorImpl(op, "cuda") {}
AndImpl_cuda(const And_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<AndImpl_cuda> create(const And_Op &op) {
static std::unique_ptr<AndImpl_cuda> create(const And_Op& op) {
return std::make_unique<AndImpl_cuda>(op);
}
public:
void forward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
private:
template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides);
};
namespace {
// add cuda backend to And_Op implementation registry
static Registrar<And_Op> registrarAndImpl_cuda("cuda", Aidge::AndImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(And_Op, "cuda", Aidge::AndImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_ANDIMPL_H_ */
......@@ -27,30 +27,34 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ArgMaxImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
public:
ArgMaxImpl_cuda(const ArgMax_Op &op) : OperatorImpl(op, "cuda") {}
ArgMaxImpl_cuda(const ArgMax_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ArgMaxImpl_cuda> create(const ArgMax_Op &op) {
static std::unique_ptr<ArgMaxImpl_cuda> create(const ArgMax_Op& op) {
return std::make_unique<ArgMaxImpl_cuda>(op);
}
public:
void forward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
template <class T> void forward_(const Tensor& input, std::int32_t axis, DimSize_t selectLastIdx);
};
namespace {
// add cuda backend to ArgMax_Op implementation registry
static Registrar<ArgMax_Op> registrarArgMaxImpl_cuda("cuda", Aidge::ArgMaxImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(ArgMax_Op, "cuda", Aidge::ArgMaxImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_ARGMAXIMPL_H_ */
......@@ -27,35 +27,41 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
template <DimIdx_t DIM>
class AvgPoolingImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
cudnnPoolingDescriptor_t mAvgPoolingDesc = nullptr;
cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
public:
AvgPoolingImpl_cuda(const AvgPooling_Op<DIM> &op) : OperatorImpl(op, "cuda") {}
AvgPoolingImpl_cuda(const AvgPooling_Op<DIM>& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<AvgPoolingImpl_cuda> create(const AvgPooling_Op<2> &op) {
static std::unique_ptr<AvgPoolingImpl_cuda> create(const AvgPooling_Op<DIM>& op) {
return std::make_unique<AvgPoolingImpl_cuda>(op);
}
public:
void forward();
void backward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
~AvgPoolingImpl_cuda();
private:
// CuDNN specific variables
cudnnPoolingDescriptor_t mAvgPoolingDesc = nullptr;
cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& output_grad);
};
namespace {
// add cuda backend to AvgPooling_Op<2> implementation registry
static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl_cuda("cuda", Aidge::AvgPoolingImpl_cuda<2>::create);
} // namespace
// Implementation entry point registration to Operator
using AvgPooling2D_Op = AvgPooling_Op<2>;
REGISTRAR(AvgPooling2D_Op, "cuda", Aidge::AvgPoolingImpl_cuda<2>::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_AVGPOOLINGIMPL_H_ */
......@@ -27,35 +27,41 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
template <DimIdx_t DIM>
class BatchNormImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
cudnnTensorDescriptor_t mBNDesc = nullptr;
cudnnBatchNormMode_t mMode;
double mEpsilon;
public:
BatchNormImpl_cuda(const BatchNorm_Op<DIM> &op) : OperatorImpl(op, "cuda") {}
BatchNormImpl_cuda(const BatchNorm_Op<DIM>& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<BatchNormImpl_cuda> create(const BatchNorm_Op<DIM> &op) {
static std::unique_ptr<BatchNormImpl_cuda> create(const BatchNorm_Op<DIM>& op) {
return std::make_unique<BatchNormImpl_cuda>(op);
}
public:
void forward();
void backward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
~BatchNormImpl_cuda();
private:
// CuDNN specific variables
cudnnTensorDescriptor_t mBNDesc = nullptr;
cudnnBatchNormMode_t mMode;
double mEpsilon;
template <class T> void forward_(const Tensor& input0, const Tensor& input1, const Tensor& input2, const Tensor& input3, const Tensor& input4);
template <class T> void backward_(const Tensor& input0, const Tensor& input1, const Tensor& input2);
};
namespace {
// add cuda backend to BatchNorm_Op<2> implementation registry
static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl_cuda("cuda", Aidge::BatchNormImpl_cuda<2>::create);
} // namespace
// Implementation entry point registration to Operator
using BatchNorm2D_Op = BatchNorm_Op<2>;
REGISTRAR(BatchNorm2D_Op, "cuda", Aidge::BatchNormImpl_cuda<2>::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_BATCHNORMIMPL_H_ */
......@@ -29,8 +29,30 @@
namespace Aidge {
// Operator implementation entry point for the backend
template <DimIdx_t DIM>
class ConvImpl_cuda : public OperatorImpl {
public:
ConvImpl_cuda(const Operator&op, bool depthWise = false) : OperatorImpl(op, "cuda"), mDepthWise(depthWise) {}
static std::unique_ptr<ConvImpl_cuda<DIM>> create(const Conv_Op<DIM>& op) {
return std::make_unique<ConvImpl_cuda<DIM>>(op);
}
static std::unique_ptr<ConvImpl_cuda<DIM>> createDW(const ConvDepthWise_Op<DIM> &op) {
return std::make_unique<ConvImpl_cuda<DIM>>(op, true);
}
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Any}
};
}
void forward() override;
void backward() override;
~ConvImpl_cuda();
private:
// CuDNN specific variables
cudnnConvolutionDescriptor_t mConvDesc = nullptr;
......@@ -46,31 +68,15 @@ private:
std::shared_ptr<Tensor> mInput2Fallback;
bool mDepthWise = false;
public:
ConvImpl_cuda(const Operator&op, bool depthWise = false) : OperatorImpl(op, "cuda"), mDepthWise(depthWise) {}
static std::unique_ptr<ConvImpl_cuda> create(const Conv_Op<DIM> &op) {
return std::make_unique<ConvImpl_cuda>(op);
}
static std::unique_ptr<ConvImpl_cuda> createDW(const ConvDepthWise_Op<DIM> &op) {
return std::make_unique<ConvImpl_cuda>(op, true);
}
public:
void forward();
void backward();
~ConvImpl_cuda();
private:
template <class T> void forward_(const Tensor& input0, const Tensor& input1, const Tensor& input2);
template <class T> void backward_(const Tensor& input0, const Tensor& input1, const Tensor& input2);
};
namespace {
static Registrar<Conv_Op<2>> registrarConvImpl_cuda("cuda", Aidge::ConvImpl_cuda<2>::create);
static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl_cuda("cuda", Aidge::ConvImpl_cuda<2>::createDW);
} // namespace
// Implementation entry point registration to Operator
using Conv2D_Op = Conv_Op<2>;
using ConvDepthWise2D_Op = ConvDepthWise_Op<2>;
REGISTRAR(Conv2D_Op, "cuda", Aidge::ConvImpl_cuda<2>::create);
REGISTRAR(ConvDepthWise2D_Op, "cuda", Aidge::ConvImpl_cuda<2>::createDW);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_CONVIMPL_H_ */
......@@ -27,30 +27,33 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class DivImpl_cuda : public OperatorImpl {
private:
public:
DivImpl_cuda(const Div_Op &op) : OperatorImpl(op, "cuda") {}
DivImpl_cuda(const Div_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<DivImpl_cuda> create(const Div_Op &op) {
static std::unique_ptr<DivImpl_cuda> create(const Div_Op& op) {
return std::make_unique<DivImpl_cuda>(op);
}
public:
void forward();
void backward();
// ~DivImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides);
template <class T> void backward_(const Tensor& outGrad);
};
namespace {
// add cuda backend to Div_Op implementation registry
static Registrar<Div_Op> registrarDivImpl_cuda("cuda", Aidge::DivImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(Div_Op, "cuda", Aidge::DivImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_DIVIMPL_H_ */
......@@ -27,34 +27,37 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class FCImpl_cuda : public OperatorImpl {
private:
std::shared_ptr<Tensor> mInput0Fallback;
std::shared_ptr<Tensor> mInput1Fallback;
std::shared_ptr<Tensor> mInput2Fallback;
public:
FCImpl_cuda(const FC_Op &op) : OperatorImpl(op, "cuda") {}
FCImpl_cuda(const FC_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<FCImpl_cuda> create(const FC_Op &op) {
static std::unique_ptr<FCImpl_cuda> create(const FC_Op& op) {
return std::make_unique<FCImpl_cuda>(op);
}
public:
void forward();
void backward();
// ~FCImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
std::shared_ptr<Tensor> mInput0Fallback;
std::shared_ptr<Tensor> mInput1Fallback;
std::shared_ptr<Tensor> mInput2Fallback;
template <class T> void forward_(const Tensor& input0, const Tensor& input1, const Tensor& input2, std::size_t outChannels);
template <class T> void backward_(const Tensor& input0, const Tensor& input1, const Tensor& input2, std::size_t outChannels);
};
namespace {
// add cuda backend to FC_Op implementation registry
static Registrar<FC_Op> registrarFCImpl_cuda("cuda", Aidge::FCImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(FC_Op, "cuda", Aidge::FCImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_FCIMPL_H_ */
......@@ -27,34 +27,37 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class GlobalAveragePoolingImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
cudnnPoolingDescriptor_t mGlobalAveragePoolingDesc = nullptr;
cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
public:
GlobalAveragePoolingImpl_cuda(const GlobalAveragePooling_Op &op) : OperatorImpl(op, "cuda") {}
GlobalAveragePoolingImpl_cuda(const GlobalAveragePooling_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<GlobalAveragePoolingImpl_cuda> create(const GlobalAveragePooling_Op &op) {
static std::unique_ptr<GlobalAveragePoolingImpl_cuda> create(const GlobalAveragePooling_Op& op) {
return std::make_unique<GlobalAveragePoolingImpl_cuda>(op);
}
public:
void forward();
void backward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Any}
};
}
void forward() override;
void backward() override;
~GlobalAveragePoolingImpl_cuda();
private:
// CuDNN specific variables
cudnnPoolingDescriptor_t mGlobalAveragePoolingDesc = nullptr;
cudnnPoolingMode_t mMode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& output_grad);
};
namespace {
// add cuda backend to GlobalAveragePooling_Op implementation registry
static Registrar<GlobalAveragePooling_Op> registrarGlobalAveragePoolingImpl_cuda("cuda", Aidge::GlobalAveragePoolingImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(GlobalAveragePooling_Op, "cuda", Aidge::GlobalAveragePoolingImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_H_ */
......@@ -27,32 +27,36 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class LnImpl_cuda : public OperatorImpl {
private:
std::shared_ptr<Tensor> mInputFallback;
std::shared_ptr<Tensor> mOutputGradFallback;
public:
LnImpl_cuda(const Ln_Op &op) : OperatorImpl(op, "cuda") {}
LnImpl_cuda(const Ln_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<LnImpl_cuda> create(const Ln_Op &op) {
static std::unique_ptr<LnImpl_cuda> create(const Ln_Op& op) {
return std::make_unique<LnImpl_cuda>(op);
}
public:
void forward();
void backward();
// ~LnImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
std::shared_ptr<Tensor> mInputFallback;
std::shared_ptr<Tensor> mOutputGradFallback;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& output_grad);
};
namespace {
// add cuda backend to Ln_Op implementation registry
static Registrar<Ln_Op> registrarLnImpl_cuda("cuda", Aidge::LnImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(Ln_Op, "cuda", Aidge::LnImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_LNIMPL_H_ */
......@@ -27,35 +27,39 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
template <DimIdx_t DIM>
class MaxPoolingImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
cudnnPoolingDescriptor_t mMaxPoolingDesc = nullptr;
cudnnPoolingMode_t mMode = CUDNN_POOLING_MAX;
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
public:
MaxPoolingImpl_cuda(const MaxPooling_Op<DIM> &op) : OperatorImpl(op, "cuda") {}
MaxPoolingImpl_cuda(const MaxPooling_Op<DIM>& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<MaxPoolingImpl_cuda> create(const MaxPooling_Op<2> &op) {
static std::unique_ptr<MaxPoolingImpl_cuda> create(const MaxPooling_Op<DIM>& op) {
return std::make_unique<MaxPoolingImpl_cuda>(op);
}
public:
void forward();
void backward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Any}
};
}
void forward() override;
void backward() override;
~MaxPoolingImpl_cuda();
private:
// CuDNN specific variables
cudnnPoolingDescriptor_t mMaxPoolingDesc = nullptr;
cudnnPoolingMode_t mMode = CUDNN_POOLING_MAX;
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& output_grad);
};
namespace {
// add cuda backend to MaxPooling_Op<2> implementation registry
static Registrar<MaxPooling_Op<2>> registrarMaxPoolingImpl_cuda("cuda", Aidge::MaxPoolingImpl_cuda<2>::create);
} // namespace
// Implementation entry point registration to Operator
using MaxPooling2D_Op = MaxPooling_Op<2>;
REGISTRAR(MaxPooling2D_Op, "cuda", Aidge::MaxPoolingImpl_cuda<2>::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_MAXPOOLINGIMPL_H_ */
......@@ -27,29 +27,33 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class MulImpl_cuda : public OperatorImpl {
private:
public:
MulImpl_cuda(const Mul_Op &op) : OperatorImpl(op, "cuda") {}
MulImpl_cuda(const Mul_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<MulImpl_cuda> create(const Mul_Op &op) {
static std::unique_ptr<MulImpl_cuda> create(const Mul_Op& op) {
return std::make_unique<MulImpl_cuda>(op);
}
public:
void forward();
void backward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides);
template <class T> void backward_(const Tensor& outputGrad, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides);
};
namespace {
// add cuda backend to Mul_Op implementation registry
static Registrar<Mul_Op> registrarMulImpl_cuda("cuda", Aidge::MulImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(Mul_Op, "cuda", Aidge::MulImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_MULIMPL_H_ */
......@@ -27,35 +27,41 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
template <DimIdx_t DIM>
class PadImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
int mLeftPad, mTopPad;
double mPadVal;
unsigned int mPadType;
public:
PadImpl_cuda(const Pad_Op<DIM> &op) : OperatorImpl(op, "cuda") {}
PadImpl_cuda(const Pad_Op<DIM>& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<PadImpl_cuda> create(const Pad_Op<2> &op) {
static std::unique_ptr<PadImpl_cuda> create(const Pad_Op<DIM>& op) {
return std::make_unique<PadImpl_cuda>(op);
}
public:
void forward();
void backward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
int mLeftPad, mTopPad;
double mPadVal;
unsigned int mPadType;
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& outGrad);
};
namespace {
// add cuda backend to Pad_Op<2> implementation registry
static Registrar<Pad_Op<2>> registrarPadImpl_cuda("cuda", Aidge::PadImpl_cuda<2>::create);
} // namespace
// Implementation entry point registration to Operator
using Pad2D_Op = Pad_Op<2>;
REGISTRAR(Pad2D_Op, "cuda", Aidge::PadImpl_cuda<2>::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_PADIMPL_H_ */
......@@ -27,30 +27,33 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class PowImpl_cuda : public OperatorImpl {
private:
public:
PowImpl_cuda(const Pow_Op &op) : OperatorImpl(op, "cuda") {}
PowImpl_cuda(const Pow_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<PowImpl_cuda> create(const Pow_Op &op) {
static std::unique_ptr<PowImpl_cuda> create(const Pow_Op& op) {
return std::make_unique<PowImpl_cuda>(op);
}
public:
void forward();
void backward();
// ~PowImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
template <class T> void forward_(const std::vector<Tensor>& inputs, const std::vector<std::vector<int>>& inputsDims, const std::vector<std::vector<int>>& inputsStrides);
template <class T> void backward_(const Tensor& outGrad);
};
namespace {
// add cuda backend to Pow_Op implementation registry
static Registrar<Pow_Op> registrarPowImpl_cuda("cuda", Aidge::PowImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(Pow_Op, "cuda", Aidge::PowImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_POWIMPL_H_ */
......@@ -27,7 +27,25 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ReLUImpl_cuda : public OperatorImpl {
public:
ReLUImpl_cuda(const ReLU_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ReLUImpl_cuda> create(const ReLU_Op& op) {
return std::make_unique<ReLUImpl_cuda>(op);
}
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Any}
};
}
void forward() override;
void backward() override;
~ReLUImpl_cuda();
private:
// CuDNN specific variables
#if CUDNN_VERSION >= 5000
......@@ -38,27 +56,12 @@ private:
std::shared_ptr<Tensor> mInputFallback;
std::shared_ptr<Tensor> mOutputGradFallback;
public:
ReLUImpl_cuda(const ReLU_Op &op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ReLUImpl_cuda> create(const ReLU_Op &op) {
return std::make_unique<ReLUImpl_cuda>(op);
}
public:
void forward();
void backward();
~ReLUImpl_cuda();
private:
template <class T> void forward_(const Tensor& input);
template <class T> void backward_(const Tensor& output_grad);
};
namespace {
// add cuda backend to ReLU_Op implementation registry
static Registrar<ReLU_Op> registrarReLUImpl_cuda("cuda", Aidge::ReLUImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(ReLU_Op, "cuda", Aidge::ReLUImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_RELUIMPL_H_ */
......@@ -27,33 +27,36 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ReduceMeanImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
public:
ReduceMeanImpl_cuda(const ReduceMean_Op &op) : OperatorImpl(op, "cuda") {}
ReduceMeanImpl_cuda(const ReduceMean_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ReduceMeanImpl_cuda> create(const ReduceMean_Op &op) {
static std::unique_ptr<ReduceMeanImpl_cuda> create(const ReduceMean_Op& op) {
return std::make_unique<ReduceMeanImpl_cuda>(op);
}
public:
void forward();
void backward();
// ~ReduceMeanImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
template <class T> void forward_(const Tensor& input, const std::vector<int>& axes, bool keepDims);
template <class T> void backward_(const Tensor& output_grad, const std::vector<int>& axes);
};
namespace {
// add cuda backend to ReduceMean_Op implementation registry
static Registrar<ReduceMean_Op> registrarReduceMeanImpl_cuda("cuda", Aidge::ReduceMeanImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(ReduceMean_Op, "cuda", Aidge::ReduceMeanImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_REDUCEMEANIMPL_H_ */
......@@ -27,32 +27,36 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ReduceSumImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
public:
ReduceSumImpl_cuda(const ReduceSum_Op &op) : OperatorImpl(op, "cuda") {}
ReduceSumImpl_cuda(const ReduceSum_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ReduceSumImpl_cuda> create(const ReduceSum_Op &op) {
static std::unique_ptr<ReduceSumImpl_cuda> create(const ReduceSum_Op& op) {
return std::make_unique<ReduceSumImpl_cuda>(op);
}
public:
void forward();
void backward();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
// CuDNN specific variables
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
template <class T> void forward_(const Tensor& input, const std::vector<int>& axes, bool keepDims);
template <class T> void backward_(const Tensor& output_grad, const std::vector<int>& axes);
};
namespace {
// add cuda backend to ReduceSum_Op implementation registry
static Registrar<ReduceSum_Op> registrarReduceSumImpl_cuda("cuda", Aidge::ReduceSumImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(ReduceSum_Op, "cuda", Aidge::ReduceSumImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_REDUCESUMIMPL_H_ */
......@@ -27,27 +27,32 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ReshapeImpl_cuda : public OperatorImpl {
private:
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
public:
ReshapeImpl_cuda(const Reshape_Op &op) : OperatorImpl(op, "cuda") {}
ReshapeImpl_cuda(const Reshape_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ReshapeImpl_cuda> create(const Reshape_Op &op) {
static std::unique_ptr<ReshapeImpl_cuda> create(const Reshape_Op& op) {
return std::make_unique<ReshapeImpl_cuda>(op);
}
public:
void forward();
void backward();
~ReshapeImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
void backward() override;
private:
std::shared_ptr<Tensor> mInputFallback, mOutputGradFallback;
};
namespace {
// add cuda backend to Reshape_Op implementation registry
static Registrar<Reshape_Op> registrarReshapeImpl_cuda("cuda", Aidge::ReshapeImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(Reshape_Op, "cuda", Aidge::ReshapeImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_RESHAPEIMPL_H_ */
......@@ -29,29 +29,33 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ShiftGELUImpl_cuda : public OperatorImpl {
private:
std::shared_ptr<Tensor> mInputFallback;
public:
ShiftGELUImpl_cuda(const ShiftGELU_Op &op) : OperatorImpl(op, "cuda") {}
ShiftGELUImpl_cuda(const ShiftGELU_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ShiftGELUImpl_cuda> create(const ShiftGELU_Op &op) {
static std::unique_ptr<ShiftGELUImpl_cuda> create(const ShiftGELU_Op& op) {
return std::make_unique<ShiftGELUImpl_cuda>(op);
}
public:
void forward();
//~ShiftGELUImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
private:
std::shared_ptr<Tensor> mInputFallback;
template <class T> void forward_(const Tensor& input);
};
namespace {
// add cuda backend to ShiftGELU_Op implementation registry
static Registrar<ShiftGELU_Op> registrarShiftGELUImpl_cuda("cuda", Aidge::ShiftGELUImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(ShiftGELU_Op, "cuda", Aidge::ShiftGELUImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_SHIFTGELUIMPL_H_ */
\ No newline at end of file
......@@ -29,29 +29,33 @@
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
// Operator implementation entry point for the backend
class ShiftMaxImpl_cuda : public OperatorImpl {
private:
std::shared_ptr<Tensor> mInputFallback;
public:
ShiftMaxImpl_cuda(const ShiftMax_Op &op) : OperatorImpl(op, "cuda") {}
ShiftMaxImpl_cuda(const ShiftMax_Op& op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<ShiftMaxImpl_cuda> create(const ShiftMax_Op &op) {
static std::unique_ptr<ShiftMaxImpl_cuda> create(const ShiftMax_Op& op) {
return std::make_unique<ShiftMaxImpl_cuda>(op);
}
public:
void forward();
//~ShiftMaxImpl_cuda();
virtual std::set<ImplSpec> getAvailableImplSpecs() const override {
return {
{DataType::Float64},
{DataType::Float32},
{DataType::Float16},
};
}
void forward() override;
private:
std::shared_ptr<Tensor> mInputFallback;
template <class T> void forward_(const Tensor& input);
};
namespace {
// add cuda backend to ShiftMax_Op implementation registry
static Registrar<ShiftMax_Op> registrarShiftMaxImpl_cuda("cuda", Aidge::ShiftMaxImpl_cuda::create);
} // namespace
// Implementation entry point registration to Operator
REGISTRAR(ShiftMax_Op, "cuda", Aidge::ShiftMaxImpl_cuda::create);
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_SHIFTMAXIMPL_H_ */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment