diff --git a/include/aidge/backend/cuda.hpp b/include/aidge/backend/cuda.hpp
index 5b9b94f03a2c5a099f010ab7117479030003f5a8..aa11c23aae3f47c9024a37c9c2efb450626e396c 100644
--- a/include/aidge/backend/cuda.hpp
+++ b/include/aidge/backend/cuda.hpp
@@ -18,5 +18,7 @@
 #include "aidge/backend/cuda/operator/FCImpl.hpp"
 #include "aidge/backend/cuda/operator/MaxPoolingImpl.hpp"
 #include "aidge/backend/cuda/operator/ReLUImpl.hpp"
+#include "aidge/backend/cuda/operator/SigmoidImpl.hpp"
+#include "aidge/backend/cuda/operator/TanhImpl.hpp"
 
 #endif /* AIDGE_BACKEND_CUDA_IMPORTS_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cuda/operator/SigmoidImpl.hpp b/include/aidge/backend/cuda/operator/SigmoidImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..39d13539d94ca38842e805f2425ad0abcce5822c
--- /dev/null
+++ b/include/aidge/backend/cuda/operator/SigmoidImpl.hpp
@@ -0,0 +1,61 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_BACKEND_CUDA_OPERATOR_SIGMOIDIMPL_H_
+#define AIDGE_BACKEND_CUDA_OPERATOR_SIGMOIDIMPL_H_
+
+#include <array>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <cudnn.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Sigmoid.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cuda/utils/CudaUtils.hpp"
+
+namespace Aidge {
+class SigmoidImpl_cuda : public OperatorImpl {
+private:
+    // CuDNN specific variables
+    #if CUDNN_VERSION >= 5000
+        cudnnActivationDescriptor_t mSigmoidDesc = nullptr;
+    #else
+        cudnnActivationMode_t mSigmoidDesc = nullptr;
+    #endif
+    std::shared_ptr<Tensor> mInputFallback;
+
+public:
+    SigmoidImpl_cuda(const Sigmoid_Op &op) : OperatorImpl(op, "cuda") {}
+
+    static std::unique_ptr<SigmoidImpl_cuda> create(const Sigmoid_Op &op) {
+        return std::make_unique<SigmoidImpl_cuda>(op);
+    }
+
+public:
+    void forward();
+    ~SigmoidImpl_cuda();
+
+private:
+    template <class T> void forward_(const Tensor& input);
+};
+
+namespace {
+// add cuda backend to Sigmoid_Op implementation registry
+static Registrar<Sigmoid_Op> registrarSigmoidImpl_cuda("cuda", Aidge::SigmoidImpl_cuda::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_BACKEND_CUDA_OPERATOR_SIGMOIDIMPL_H_ */
diff --git a/include/aidge/backend/cuda/operator/TanhImpl.hpp b/include/aidge/backend/cuda/operator/TanhImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..385dd06e86bc9ec68a61c9308c6cb3ac8d8687f5
--- /dev/null
+++ b/include/aidge/backend/cuda/operator/TanhImpl.hpp
@@ -0,0 +1,61 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_BACKEND_CUDA_OPERATOR_TANHIMPL_H_
+#define AIDGE_BACKEND_CUDA_OPERATOR_TANHIMPL_H_
+
+#include <array>
+#include <memory>
+#include <tuple>
+#include <vector>
+
+#include <cudnn.h>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Tanh.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cuda/utils/CudaUtils.hpp"
+
+namespace Aidge {
+class TanhImpl_cuda : public OperatorImpl {
+private:
+    // CuDNN specific variables
+    #if CUDNN_VERSION >= 5000
+        cudnnActivationDescriptor_t mTanhDesc = nullptr;
+    #else
+        cudnnActivationMode_t mTanhDesc = nullptr;
+    #endif
+    std::shared_ptr<Tensor> mInputFallback;
+
+public:
+    TanhImpl_cuda(const Tanh_Op &op) : OperatorImpl(op, "cuda") {}
+
+    static std::unique_ptr<TanhImpl_cuda> create(const Tanh_Op &op) {
+        return std::make_unique<TanhImpl_cuda>(op);
+    }
+
+public:
+    void forward();
+    ~TanhImpl_cuda();
+
+private:
+    template <class T> void forward_(const Tensor& input);
+};
+
+namespace {
+// add cuda backend to Tanh_Op implementation registry
+static Registrar<Tanh_Op> registrarTanhImpl_cuda("cuda", Aidge::TanhImpl_cuda::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_BACKEND_CUDA_OPERATOR_TANHIMPL_H_ */
diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..30767cdf25034f3d5ae19241d87a90f665878d02
--- /dev/null
+++ b/src/operator/SigmoidImpl.cpp
@@ -0,0 +1,74 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <vector>
+
+#include "aidge/backend/cuda/data/TensorImpl.hpp"
+#include "aidge/backend/cuda/operator/SigmoidImpl.hpp"
+#include "aidge/backend/cuda/utils/CudaContext.hpp"
+#include "aidge/backend/cuda/utils/CudaUtils.hpp"
+#include "aidge/operator/Sigmoid.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::SigmoidImpl_cuda::forward() {
+    const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
+
+    assert(mOp.getRawInput(0) && "missing input #0");
+
+    const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
+
+    // Lazy-initialize CuDNN Sigmoid descriptor
+    if (mSigmoidDesc == nullptr) {
+		#if CUDNN_VERSION >= 5000
+			CHECK_CUDNN_STATUS(cudnnCreateActivationDescriptor(&mSigmoidDesc));
+			CHECK_CUDNN_STATUS(cudnnSetActivationDescriptor(
+				mSigmoidDesc, CUDNN_ACTIVATION_SIGMOID, CUDNN_NOT_PROPAGATE_NAN, 0.0));
+		#else
+			mSigmoidDesc = CUDNN_ACTIVATION_SIGMOID;
+		#endif
+    }
+
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
+    if (op.getOutput(0)->dataType() == DataType::Float64) {
+        forward_<double>(input);
+    }
+    else {
+        forward_<float>(input);
+    }
+}
+
+template <class T>
+void Aidge::SigmoidImpl_cuda::forward_(const Tensor& input) {
+    const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
+    const typename Cuda::cudnn_scaling_type<T>::type alpha = 1.0f;
+    const typename Cuda::cudnn_scaling_type<T>::type beta = 0.0f;
+    CHECK_CUDNN_STATUS(
+        cudnnActivationForward(CudaContext::cudnnHandle(),
+                               mSigmoidDesc,
+                               &alpha,
+							   std::dynamic_pointer_cast<TensorImpl_cuda_>(input.getImpl())->getCudnnTensorDesc(input),
+                               input.getImpl()->rawPtr(),
+                               &beta,
+                               std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0)),
+                               std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr()));
+}
+
+Aidge::SigmoidImpl_cuda::~SigmoidImpl_cuda() {
+    if (mSigmoidDesc != nullptr) {
+		#if CUDNN_VERSION >= 5000
+            cudnnDestroyActivationDescriptor(mSigmoidDesc);
+		#endif
+    }
+}
+
diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..264a69a9772daf6a796c69469aa63410a3ecc8ae
--- /dev/null
+++ b/src/operator/TanhImpl.cpp
@@ -0,0 +1,74 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <vector>
+
+#include "aidge/backend/cuda/data/TensorImpl.hpp"
+#include "aidge/backend/cuda/operator/TanhImpl.hpp"
+#include "aidge/backend/cuda/utils/CudaContext.hpp"
+#include "aidge/backend/cuda/utils/CudaUtils.hpp"
+#include "aidge/operator/Tanh.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::TanhImpl_cuda::forward() {
+    const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
+
+    assert(mOp.getRawInput(0) && "missing input #0");
+
+    const auto& input = op.getInput(0)->refCastFrom(mInputFallback, *op.getOutput(0));
+
+    // Lazy-initialize CuDNN Tanh descriptor
+    if (mTanhDesc == nullptr) {
+		#if CUDNN_VERSION >= 5000
+			CHECK_CUDNN_STATUS(cudnnCreateActivationDescriptor(&mTanhDesc));
+			CHECK_CUDNN_STATUS(cudnnSetActivationDescriptor(
+				mTanhDesc, CUDNN_ACTIVATION_TANH, CUDNN_NOT_PROPAGATE_NAN, 0.0));
+		#else
+			mTanhDesc = CUDNN_ACTIVATION_TANH;
+		#endif
+    }
+
+    // Do the actual forward computation
+    // Template is only for scaling parameters, which are always in float
+    // excepted when the convolution is performed in double precision.
+    if (op.getOutput(0)->dataType() == DataType::Float64) {
+        forward_<double>(input);
+    }
+    else {
+        forward_<float>(input);
+    }
+}
+
+template <class T>
+void Aidge::TanhImpl_cuda::forward_(const Tensor& input) {
+    const OperatorTensor& op = static_cast<const OperatorTensor&>(mOp);
+    const typename Cuda::cudnn_scaling_type<T>::type alpha = 1.0f;
+    const typename Cuda::cudnn_scaling_type<T>::type beta = 0.0f;
+    CHECK_CUDNN_STATUS(
+        cudnnActivationForward(CudaContext::cudnnHandle(),
+                               mTanhDesc,
+                               &alpha,
+							   std::dynamic_pointer_cast<TensorImpl_cuda_>(input.getImpl())->getCudnnTensorDesc(input),
+                               input.getImpl()->rawPtr(),
+                               &beta,
+                               std::dynamic_pointer_cast<TensorImpl_cuda_>(op.getOutput(0)->getImpl())->getCudnnTensorDesc(*op.getOutput(0)),
+                               std::static_pointer_cast<Tensor>(op.getRawOutput(0))->getImpl()->rawPtr()));
+}
+
+Aidge::TanhImpl_cuda::~TanhImpl_cuda() {
+    if (mTanhDesc != nullptr) {
+		#if CUDNN_VERSION >= 5000
+            cudnnDestroyActivationDescriptor(mTanhDesc);
+		#endif
+    }
+}
+