diff --git a/include/aidge/backend/cuda/data/TensorImpl.hpp b/include/aidge/backend/cuda/data/TensorImpl.hpp
index 6b309c5e30585a71330de73568010ac72754aed7..45f8a6c3bd8ac9e40f8f7716119e7a36ecfc1b3c 100644
--- a/include/aidge/backend/cuda/data/TensorImpl.hpp
+++ b/include/aidge/backend/cuda/data/TensorImpl.hpp
@@ -85,6 +85,7 @@ public:
             return;
         }
 
+        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         if (srcDt == DataType::Float64) {
             thrust_copy(static_cast<const double*>(src),
                         static_cast<T*>(rawPtr()),
@@ -141,14 +142,17 @@ public:
     }
 
     void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, int>& device) override {
+        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         CHECK_CUDA_STATUS(cudaMemcpy(rawPtr(), src, length * sizeof(T), cudaMemcpyDeviceToDevice));
     }
 
     void copyFromHost(const void *src, NbElts_t length) override {
+        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         CHECK_CUDA_STATUS(cudaMemcpy(rawPtr(), src, length * sizeof(T), cudaMemcpyHostToDevice));
     }
 
     void copyToHost(void *dst, NbElts_t length) const override {
+        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         CHECK_CUDA_STATUS(cudaMemcpy(dst, rawPtr(), length * sizeof(T), cudaMemcpyDeviceToHost));
     }