Skip to content
Snippets Groups Projects
Commit 0efec0ee authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

update tensorDesc call

parent ca5d5c3c
No related branches found
No related tags found
3 merge requests!15version 0.2.0,!12Lenetop,!10Lenet operators
...@@ -24,14 +24,16 @@ ...@@ -24,14 +24,16 @@
template <Aidge::DimIdx_t DIM> template <Aidge::DimIdx_t DIM>
void Aidge::AvgPoolingImpl_cuda<DIM>::forward() { void Aidge::AvgPoolingImpl_cuda<DIM>::forward() {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
assert(mOp.getRawInput(0) && "missing input #0"); assert(mOp.getRawInput(0) && "missing input #0");
std::shared_ptr<Tensor> inputFallback; std::shared_ptr<Tensor> inputFallback;
const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(inputFallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); const auto& input = std::static_pointer_cast<Tensor>(op.getRawInput(0))->refCastFrom(inputFallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0)));
// Lazy-initialize CuDNN AvgPooling descriptor // Lazy-initialize CuDNN AvgPooling descriptor
if (mAvgPoolingDesc == nullptr) { if (mAvgPoolingDesc == nullptr) {
const AvgPooling_Op<DIM>& avgPoolingOp = static_cast<const AvgPooling_Op<DIM>&>(mOp); const AvgPooling_Op<DIM>& avgPoolingOp = static_cast<const AvgPooling_Op<DIM>&>(op);
const std::vector<int> strides(avgPoolingOp.template getAttr<AvgPoolingAttr::StrideDims>().begin(), avgPoolingOp.template getAttr<AvgPoolingAttr::StrideDims>().end()); const std::vector<int> strides(avgPoolingOp.template getAttr<AvgPoolingAttr::StrideDims>().begin(), avgPoolingOp.template getAttr<AvgPoolingAttr::StrideDims>().end());
const std::vector<int> paddings(DIM, 0); const std::vector<int> paddings(DIM, 0);
const std::vector<int> window_dims(avgPoolingOp.template getAttr<AvgPoolingAttr::KernelDims>().begin(), avgPoolingOp.template getAttr<AvgPoolingAttr::KernelDims>().end()); const std::vector<int> window_dims(avgPoolingOp.template getAttr<AvgPoolingAttr::KernelDims>().begin(), avgPoolingOp.template getAttr<AvgPoolingAttr::KernelDims>().end());
...@@ -58,6 +60,7 @@ void Aidge::AvgPoolingImpl_cuda<DIM>::forward() { ...@@ -58,6 +60,7 @@ void Aidge::AvgPoolingImpl_cuda<DIM>::forward() {
template <Aidge::DimIdx_t DIM> template <Aidge::DimIdx_t DIM>
template <class T> template <class T>
void Aidge::AvgPoolingImpl_cuda<DIM>::forward_(const Tensor& input) { void Aidge::AvgPoolingImpl_cuda<DIM>::forward_(const Tensor& input) {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
const T alpha = 1.0f; const T alpha = 1.0f;
const T beta = 0.0f; const T beta = 0.0f;
CHECK_CUDNN_STATUS( CHECK_CUDNN_STATUS(
...@@ -65,11 +68,11 @@ void Aidge::AvgPoolingImpl_cuda<DIM>::forward_(const Tensor& input) { ...@@ -65,11 +68,11 @@ void Aidge::AvgPoolingImpl_cuda<DIM>::forward_(const Tensor& input) {
CudaContext::cudnnHandle(), CudaContext::cudnnHandle(),
mAvgPoolingDesc, mAvgPoolingDesc,
&alpha, &alpha,
dynamic_cast<TensorImpl_cuda_*>(input.getImpl().get())->getCudnnTensorDesc(), std::dynamic_pointer_cast<TensorImpl_cuda_>(input.getImpl())->getCudnnTensorDesc(input),
input.getImpl()->rawPtr(), input.getImpl()->rawPtr(),
&beta, &beta,
dynamic_cast<TensorImpl_cuda_*>(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl().get())->getCudnnTensorDesc(), std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0)),
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr() std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr()
) )
); );
} }
......
...@@ -24,14 +24,16 @@ ...@@ -24,14 +24,16 @@
template <Aidge::DimIdx_t DIM> template <Aidge::DimIdx_t DIM>
void Aidge::MaxPoolingImpl_cuda<DIM>::forward() { void Aidge::MaxPoolingImpl_cuda<DIM>::forward() {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
assert(mOp.getRawInput(0) && "missing input #0"); assert(mOp.getRawInput(0) && "missing input #0");
std::shared_ptr<Tensor> inputFallback; std::shared_ptr<Tensor> inputFallback;
const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(inputFallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); const auto& input = std::static_pointer_cast<Tensor>(op.getRawInput(0))->refCastFrom(inputFallback, *std::static_pointer_cast<Tensor>(op.getRawOutput(0)));
// Lazy-initialize CuDNN MaxPooling descriptor // Lazy-initialize CuDNN MaxPooling descriptor
if (mMaxPoolingDesc == nullptr) { if (mMaxPoolingDesc == nullptr) {
const MaxPooling_Op<DIM>& maxPoolingOp = static_cast<const MaxPooling_Op<DIM>&>(mOp); const MaxPooling_Op<DIM>& maxPoolingOp = static_cast<const MaxPooling_Op<DIM>&>(op);
const std::vector<int> strides(maxPoolingOp.template getAttr<MaxPoolingAttr::StrideDims>().begin(), maxPoolingOp.template getAttr<MaxPoolingAttr::StrideDims>().end()); const std::vector<int> strides(maxPoolingOp.template getAttr<MaxPoolingAttr::StrideDims>().begin(), maxPoolingOp.template getAttr<MaxPoolingAttr::StrideDims>().end());
const std::vector<int> paddings(DIM, 0); const std::vector<int> paddings(DIM, 0);
const std::vector<int> window_dims(maxPoolingOp.template getAttr<MaxPoolingAttr::KernelDims>().begin(), maxPoolingOp.template getAttr<MaxPoolingAttr::KernelDims>().end()); const std::vector<int> window_dims(maxPoolingOp.template getAttr<MaxPoolingAttr::KernelDims>().begin(), maxPoolingOp.template getAttr<MaxPoolingAttr::KernelDims>().end());
...@@ -58,6 +60,7 @@ void Aidge::MaxPoolingImpl_cuda<DIM>::forward() { ...@@ -58,6 +60,7 @@ void Aidge::MaxPoolingImpl_cuda<DIM>::forward() {
template <Aidge::DimIdx_t DIM> template <Aidge::DimIdx_t DIM>
template <class T> template <class T>
void Aidge::MaxPoolingImpl_cuda<DIM>::forward_(const Tensor& input) { void Aidge::MaxPoolingImpl_cuda<DIM>::forward_(const Tensor& input) {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
const T alpha = 1.0f; const T alpha = 1.0f;
const T beta = 0.0f; const T beta = 0.0f;
CHECK_CUDNN_STATUS( CHECK_CUDNN_STATUS(
...@@ -65,11 +68,11 @@ void Aidge::MaxPoolingImpl_cuda<DIM>::forward_(const Tensor& input) { ...@@ -65,11 +68,11 @@ void Aidge::MaxPoolingImpl_cuda<DIM>::forward_(const Tensor& input) {
CudaContext::cudnnHandle(), CudaContext::cudnnHandle(),
mMaxPoolingDesc, mMaxPoolingDesc,
&alpha, &alpha,
dynamic_cast<TensorImpl_cuda_*>(input.getImpl().get())->getCudnnTensorDesc(), std::dynamic_pointer_cast<TensorImpl_cuda_>(input.getImpl())->getCudnnTensorDesc(input),
input.getImpl()->rawPtr(), input.getImpl()->rawPtr(),
&beta, &beta,
dynamic_cast<TensorImpl_cuda_*>(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl().get())->getCudnnTensorDesc(), std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0)),
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr() std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr()
) )
); );
} }
......
...@@ -23,10 +23,12 @@ ...@@ -23,10 +23,12 @@
#include "aidge/backend/cuda/utils/CudaContext.hpp" #include "aidge/backend/cuda/utils/CudaContext.hpp"
void Aidge::ReLUImpl_cuda::forward() { void Aidge::ReLUImpl_cuda::forward() {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
assert(mOp.getRawInput(0) && "missing input #0"); assert(mOp.getRawInput(0) && "missing input #0");
std::shared_ptr<Tensor> inputFallback; std::shared_ptr<Tensor> inputFallback;
const auto& input = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->refCastFrom(inputFallback, *std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))); const auto& input = std::static_pointer_cast<Tensor>(op.getRawInput(0))->refCastFrom(inputFallback, *std::static_pointer_cast<Tensor>(op.getRawOutput(0)));
// Lazy-initialize CuDNN ReLU descriptor // Lazy-initialize CuDNN ReLU descriptor
if (mReLUDesc == nullptr) { if (mReLUDesc == nullptr) {
...@@ -49,17 +51,18 @@ void Aidge::ReLUImpl_cuda::forward() { ...@@ -49,17 +51,18 @@ void Aidge::ReLUImpl_cuda::forward() {
template <class T> template <class T>
void Aidge::ReLUImpl_cuda::forward_(const Tensor& input) { void Aidge::ReLUImpl_cuda::forward_(const Tensor& input) {
const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
const T alpha = 1.0f; const T alpha = 1.0f;
const T beta = 0.0f; const T beta = 0.0f;
CHECK_CUDNN_STATUS( CHECK_CUDNN_STATUS(
cudnnActivationForward(CudaContext::cudnnHandle(), cudnnActivationForward(CudaContext::cudnnHandle(),
mReLUDesc, mReLUDesc,
&alpha, &alpha,
dynamic_cast<TensorImpl_cuda_*>(input.getImpl().get())->getCudnnTensorDesc(), std::dynamic_pointer_cast<TensorImpl_cuda_>(input.getImpl())->getCudnnTensorDesc(input),
input.getImpl()->rawPtr(), input.getImpl()->rawPtr(),
&beta, &beta,
dynamic_cast<TensorImpl_cuda_*>(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl().get())->getCudnnTensorDesc(), std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0)),
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr())); std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr()));
} }
Aidge::ReLUImpl_cuda::~ReLUImpl_cuda() { Aidge::ReLUImpl_cuda::~ReLUImpl_cuda() {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment