Skip to content
Snippets Groups Projects
Commit c7a8ee4a authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'Added-Sigmoid-and-Tanh' into 'dev'

Added sigmoid and tanh

See merge request !22
parents b7cadcef d1d98fc0
No related branches found
No related tags found
2 merge requests!32version 0.2.1,!22Added sigmoid and tanh
Pipeline #46985 passed
...@@ -18,5 +18,7 @@ ...@@ -18,5 +18,7 @@
#include "aidge/backend/cuda/operator/FCImpl.hpp" #include "aidge/backend/cuda/operator/FCImpl.hpp"
#include "aidge/backend/cuda/operator/MaxPoolingImpl.hpp" #include "aidge/backend/cuda/operator/MaxPoolingImpl.hpp"
#include "aidge/backend/cuda/operator/ReLUImpl.hpp" #include "aidge/backend/cuda/operator/ReLUImpl.hpp"
#include "aidge/backend/cuda/operator/SigmoidImpl.hpp"
#include "aidge/backend/cuda/operator/TanhImpl.hpp"
#endif /* AIDGE_BACKEND_CUDA_IMPORTS_H_ */ #endif /* AIDGE_BACKEND_CUDA_IMPORTS_H_ */
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_BACKEND_CUDA_OPERATOR_SIGMOIDIMPL_H_
#define AIDGE_BACKEND_CUDA_OPERATOR_SIGMOIDIMPL_H_
#include <array>
#include <memory>
#include <tuple>
#include <vector>
#include <cudnn.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Sigmoid.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
class SigmoidImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
#if CUDNN_VERSION >= 5000
cudnnActivationDescriptor_t mSigmoidDesc = nullptr;
#else
cudnnActivationMode_t mSigmoidDesc = nullptr;
#endif
std::shared_ptr<Tensor> mInputFallback;
public:
SigmoidImpl_cuda(const Sigmoid_Op &op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<SigmoidImpl_cuda> create(const Sigmoid_Op &op) {
return std::make_unique<SigmoidImpl_cuda>(op);
}
public:
void forward();
~SigmoidImpl_cuda();
private:
template <class T> void forward_(const Tensor& input);
};
namespace {
// add cuda backend to Sigmoid_Op implementation registry
static Registrar<Sigmoid_Op> registrarSigmoidImpl_cuda("cuda", Aidge::SigmoidImpl_cuda::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_SIGMOIDIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_BACKEND_CUDA_OPERATOR_TANHIMPL_H_
#define AIDGE_BACKEND_CUDA_OPERATOR_TANHIMPL_H_
#include <array>
#include <memory>
#include <tuple>
#include <vector>
#include <cudnn.h>
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Tanh.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
namespace Aidge {
class TanhImpl_cuda : public OperatorImpl {
private:
// CuDNN specific variables
#if CUDNN_VERSION >= 5000
cudnnActivationDescriptor_t mTanhDesc = nullptr;
#else
cudnnActivationMode_t mTanhDesc = nullptr;
#endif
std::shared_ptr<Tensor> mInputFallback;
public:
TanhImpl_cuda(const Tanh_Op &op) : OperatorImpl(op, "cuda") {}
static std::unique_ptr<TanhImpl_cuda> create(const Tanh_Op &op) {
return std::make_unique<TanhImpl_cuda>(op);
}
public:
void forward();
~TanhImpl_cuda();
private:
template <class T> void forward_(const Tensor& input);
};
namespace {
// add cuda backend to Tanh_Op implementation registry
static Registrar<Tanh_Op> registrarTanhImpl_cuda("cuda", Aidge::TanhImpl_cuda::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_BACKEND_CUDA_OPERATOR_TANHIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <vector>
#include "aidge/backend/cuda/data/TensorImpl.hpp"
#include "aidge/backend/cuda/operator/SigmoidImpl.hpp"
#include "aidge/backend/cuda/utils/CudaContext.hpp"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
#include "aidge/operator/Sigmoid.hpp"
#include "aidge/utils/Types.h"
void Aidge::SigmoidImpl_cuda::forward() {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
assert(mOp.getRawInput(0) && "missing input #0");
const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
// Lazy-initialize CuDNN Sigmoid descriptor
if (mSigmoidDesc == nullptr) {
#if CUDNN_VERSION >= 5000
CHECK_CUDNN_STATUS(cudnnCreateActivationDescriptor(&mSigmoidDesc));
CHECK_CUDNN_STATUS(cudnnSetActivationDescriptor(
mSigmoidDesc, CUDNN_ACTIVATION_SIGMOID, CUDNN_NOT_PROPAGATE_NAN, 0.0));
#else
mSigmoidDesc = CUDNN_ACTIVATION_SIGMOID;
#endif
}
// Do the actual forward computation
// Template is only for scaling parameters, which are always in float
// excepted when the convolution is performed in double precision.
if (op.getOutput(0)->dataType() == DataType::Float64) {
forward_<double>(input);
}
else {
forward_<float>(input);
}
}
template <class T>
void Aidge::SigmoidImpl_cuda::forward_(const Tensor& input) {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
const typename Cuda::cudnn_scaling_type<T>::type alpha = 1.0f;
const typename Cuda::cudnn_scaling_type<T>::type beta = 0.0f;
CHECK_CUDNN_STATUS(
cudnnActivationForward(CudaContext::cudnnHandle(),
mSigmoidDesc,
&alpha,
std::dynamic_pointer_cast<TensorImpl_cuda_>(input.getImpl())->getCudnnTensorDesc(input),
input.getImpl()->rawPtr(),
&beta,
std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0)),
std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr()));
}
Aidge::SigmoidImpl_cuda::~SigmoidImpl_cuda() {
if (mSigmoidDesc != nullptr) {
#if CUDNN_VERSION >= 5000
cudnnDestroyActivationDescriptor(mSigmoidDesc);
#endif
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <vector>
#include "aidge/backend/cuda/data/TensorImpl.hpp"
#include "aidge/backend/cuda/operator/TanhImpl.hpp"
#include "aidge/backend/cuda/utils/CudaContext.hpp"
#include "aidge/backend/cuda/utils/CudaUtils.hpp"
#include "aidge/operator/Tanh.hpp"
#include "aidge/utils/Types.h"
void Aidge::TanhImpl_cuda::forward() {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
assert(mOp.getRawInput(0) && "missing input #0");
const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
// Lazy-initialize CuDNN Tanh descriptor
if (mTanhDesc == nullptr) {
#if CUDNN_VERSION >= 5000
CHECK_CUDNN_STATUS(cudnnCreateActivationDescriptor(&mTanhDesc));
CHECK_CUDNN_STATUS(cudnnSetActivationDescriptor(
mTanhDesc, CUDNN_ACTIVATION_TANH, CUDNN_NOT_PROPAGATE_NAN, 0.0));
#else
mTanhDesc = CUDNN_ACTIVATION_TANH;
#endif
}
// Do the actual forward computation
// Template is only for scaling parameters, which are always in float
// excepted when the convolution is performed in double precision.
if (op.getOutput(0)->dataType() == DataType::Float64) {
forward_<double>(input);
}
else {
forward_<float>(input);
}
}
template <class T>
void Aidge::TanhImpl_cuda::forward_(const Tensor& input) {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
const typename Cuda::cudnn_scaling_type<T>::type alpha = 1.0f;
const typename Cuda::cudnn_scaling_type<T>::type beta = 0.0f;
CHECK_CUDNN_STATUS(
cudnnActivationForward(CudaContext::cudnnHandle(),
mTanhDesc,
&alpha,
std::dynamic_pointer_cast<TensorImpl_cuda_>(input.getImpl())->getCudnnTensorDesc(input),
input.getImpl()->rawPtr(),
&beta,
std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0)),
std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr()));
}
Aidge::TanhImpl_cuda::~TanhImpl_cuda() {
if (mTanhDesc != nullptr) {
#if CUDNN_VERSION >= 5000
cudnnDestroyActivationDescriptor(mTanhDesc);
#endif
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment