diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 1c41206bf1e8f7a3830071f7d2041b19125db2e8..12dd5433ada26f8a88df0a35f22c3a7052940e7d 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -6,6 +6,7 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/future_std/span.hpp"
 
 namespace Aidge {
 template <class T>
@@ -13,7 +14,10 @@ class TensorImpl_cpu : public TensorImpl {
    private:
     const Tensor &mTensor;  // Impl needs to access Tensor information, but is not
                             // supposed to change it!
-    std::vector<T> mData;
+    /// Pointer to the data and its capacity
+    future_std::span<T> mData;
+    /// If this instance own the data, std::unique_ptr manages it
+    std::unique_ptr<T[]> mDataOwner;
 
    public:
     static constexpr const char *Backend = "cpu";
@@ -21,9 +25,12 @@ class TensorImpl_cpu : public TensorImpl {
     TensorImpl_cpu(const Tensor &tensor) : TensorImpl(Backend), mTensor(tensor) {}
 
     bool operator==(const TensorImpl &otherImpl) const override final {
+        const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
+        AIDGE_INTERNAL_ASSERT(typedOtherImpl.data().size() >= mTensor.size());
+
         std::size_t i = 0;
         for (; i < mTensor.size() &&
-               mData[i] == reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl).data()[i];
+               mData[i] == typedOtherImpl.data()[i];
              ++i) {
         }
         return i == mTensor.size();
@@ -34,7 +41,7 @@ class TensorImpl_cpu : public TensorImpl {
     }
 
     // native interface
-    const std::vector<T> &data() const { return mData; }
+    const future_std::span<T>& data() const { return mData; }
 
     std::size_t scalarSize() const override { return sizeof(T); }
 
@@ -110,41 +117,47 @@ class TensorImpl_cpu : public TensorImpl {
     }
 
     void *rawPtr() override {
-        lazyInit(mData);
+        lazyInit();
         return mData.data();
     };
 
     const void *rawPtr() const override {
-        AIDGE_ASSERT(mData.size() == mTensor.size(), "accessing uninitialized const rawPtr");
+        AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
         return mData.data();
     };
 
     void *hostPtr() override {
-        lazyInit(mData);
+        lazyInit();
         return mData.data();
     };
 
     const void *hostPtr() const override {
-        AIDGE_ASSERT(mData.size() == mTensor.size(), "accessing uninitialized const hostPtr");
+        AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr");
         return mData.data();
     };
 
-   void* getRaw(std::size_t idx){
-       return  static_cast<void*>(static_cast<T *>(rawPtr()) + idx);
-   };
+    void* getRaw(std::size_t idx) {
+        AIDGE_ASSERT(idx < mData.size(), "idx out of range");
+        return  static_cast<void*>(static_cast<T *>(rawPtr()) + idx);
+    };
 
     virtual ~TensorImpl_cpu() = default;
 
-    void setRawPtr(void *ptr) override final {
-        T *newPtr = static_cast<T *>(ptr);
-        mData = std::vector<T>(newPtr, newPtr + mTensor.size());
+    void setRawPtr(void *ptr, NbElts_t length) override final {
+        AIDGE_ASSERT(length >= mTensor.size(), "trying to set raw pointer of insufficient capacity");
+        mData = future_std::span<T>(static_cast<T *>(ptr), length);
     };
 
-   private:
-    void lazyInit(std::vector<T> &data) {
-        assert(mTensor.dataType() == NativeType<T>::type);
+private:
+    void lazyInit() {
+        AIDGE_INTERNAL_ASSERT(mTensor.dataType() == NativeType<T>::type);
 
-        if (data.size() != mTensor.size()) data.resize(mTensor.size());
+        if (mData.size() < mTensor.size()) {
+            // Need more data, a re-allocation will occur
+            AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
+            mDataOwner.reset(new T[mTensor.size()]);
+            mData = future_std::span<T>(mDataOwner.get(), mTensor.size());
+        }
     }
 };