diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 8b2987b35e0b9eff52cf6aebedee537b6330e2b0..dd31088d136cc03278ecf23cae4dc15165017b29 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -51,6 +51,7 @@ class TensorImpl_cpu : public TensorImpl {
     }
 
     void copy(const void *src, NbElts_t length) override {
+        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
                   static_cast<T *>(rawPtr()));
     }
@@ -60,6 +61,7 @@ class TensorImpl_cpu : public TensorImpl {
             return;
         }
 
+        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         if (srcDt == DataType::Float64) {
             std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
                     static_cast<T *>(rawPtr()));
@@ -120,6 +122,7 @@ class TensorImpl_cpu : public TensorImpl {
     }
 
     void copyToHost(void *dst, NbElts_t length) const override {
+        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
         const T* src = static_cast<const T*>(rawPtr());
         std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
                   static_cast<T *>(dst));
diff --git a/unit_tests/scheduler/Test_Convert.cpp b/unit_tests/scheduler/Test_Convert.cpp
index 8a71ed35834143d9e73a68e6acfdae713d5bc8a2..79a5f50c8c6921badb0e7330d9c2c27c882a9fc9 100644
--- a/unit_tests/scheduler/Test_Convert.cpp
+++ b/unit_tests/scheduler/Test_Convert.cpp
@@ -140,6 +140,13 @@ TEST_CASE("[cpu/convert] Convert(forward)") {
         REQUIRE(approxEq<int>(*other4, expectedOutput4, 0.0, 1.0e-12));
     }
 
+    SECTION("Half") {
+        Tensor refTensor = Array2D<float, 3, 2>{{{0.0, 1.0},{2.1, 3.4},{5000.0, 1.0e5}}};
+        Tensor tensor(DataType::Float16);
+        tensor.copyCastFrom(refTensor);
+        REQUIRE(approxEq<float, half_float::half>(refTensor, tensor, 1.0e-3, 0.0));
+    }
+
     SECTION("Test explicit") {
         std::shared_ptr<GraphView> g =
                 Sequential({
@@ -188,7 +195,7 @@ TEST_CASE("[cpu/convert] Convert(forward)") {
         // input->addChild(g);
         g->setDataType(Aidge::DataType::Int32);
         g->getNode("conv1")->getOperator()->setDataType(DataType::Float32);
-        g->getNode("conv3")->getOperator()->setDataType(DataType::Float16);
+        g->getNode("conv3")->getOperator()->setDataType(DataType::Float64);
 
         explicitConvert(g);
         g->setBackend("cpu");
@@ -232,7 +239,7 @@ TEST_CASE("[cpu/convert] Convert(forward)") {
         std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
         REQUIRE(approxEq<int>(*other2, *expectedOutput2, 0.0, 1.0e-12));
         std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
-        REQUIRE(approxEq<half_float::half, int>(*other3, *expectedOutput3, 0.0, 1.0e-12));
+        REQUIRE(approxEq<double, int>(*other3, *expectedOutput3, 0.0, 1.0e-12));
         std::shared_ptr<Tensor> other4 = std::static_pointer_cast<OperatorTensor>(g->getNode("fc")->getOperator())->getOutput(0);
         REQUIRE(approxEq<int>(*other4, expectedOutput4, 0.0, 1.0e-12));
     }