diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
index 38ea848afc29fa4c23ff500f97e0c57954695021..47e3b07e8fa08cdcd714745a9a49bb03e30f79f5 100644
--- a/include/aidge/backend/cpu/data/GetCPUPtr.h
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -16,7 +16,8 @@
 
 namespace Aidge {
 inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
-  return std::static_pointer_cast<Tensor>(data)->getImpl()->rawPtr();
+  const auto tensor = std::static_pointer_cast<Tensor>(data);
+  return tensor->getImpl()->hostPtr(tensor->getImplOffset());
 }
 } // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index c451b4a5beccacb7980c834d56b979c1b76cdd3f..46dfae3d53b4b201507290bd538ea13737919c3e 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -25,8 +25,6 @@ namespace Aidge {
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
 private:
-    const Tensor &mTensor;  // Impl needs to access Tensor information, but is not
-                            // supposed to change it!
     /// Pointer to the data and its capacity
     future_std::span<T> mData;
     /// If this instance own the data, std::unique_ptr manages it
@@ -35,88 +33,87 @@ private:
 public:
     static constexpr const char *Backend = "cpu";
 
-    TensorImpl_cpu(const Tensor &tensor) : TensorImpl(Backend), mTensor(tensor) {}
+    TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
 
     bool operator==(const TensorImpl &otherImpl) const override final {
         const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
-        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mTensor.size());
+        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
 
         std::size_t i = 0;
-        for (; i < mTensor.size() &&
+        for (; i < mNbElts &&
                *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
                ++i) {
         }
-        return i == mTensor.size();
+        return i == mNbElts;
     }
 
-    static std::unique_ptr<TensorImpl_cpu> create(const Tensor &tensor) {
-        return std::make_unique<TensorImpl_cpu<T>>(tensor);
+    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
+        return std::make_shared<TensorImpl_cpu<T>>(device, length);
     }
 
-    inline std::size_t size() const noexcept override final { return mData.size(); }
     inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
 
-    void setDevice(DeviceIdx_t device) override final {
-        AIDGE_ASSERT(device == 0, "device cannot be != 0 for CPU backend");
-    }
-
     void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
-        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
-        std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
-                  static_cast<T *>(rawPtr()) + offset);
+        const T* srcT = static_cast<const T *>(src);
+        T* dstT = static_cast<T *>(rawPtr(offset));
+
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
+        std::copy(srcT, srcT + length, dstT);
     }
 
-    void copyCast(const void *src, NbElts_t length, const DataType srcDt) override final {
+    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
         if (length == 0) {
             return;
         }
 
-        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
+        T* dstT = static_cast<T *>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
         switch (srcDt)
         {
             case DataType::Float64:
                 std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Float32:
                 std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Float16:
                 std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int64:
                 std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt64:
                 std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int32:
                 std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt32:
                 std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int16:
                 std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt16:
                 std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::Int8:
                 std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             case DataType::UInt8:
                 std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
-                        static_cast<T *>(rawPtr()));
+                        dstT);
                 break;
             default:
                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
@@ -124,21 +121,20 @@ public:
         }
     }
 
-    void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) override final {
+    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
         AIDGE_ASSERT(device.first == Backend, "backend must match");
         AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
-        copy(src, length);
+        copy(src, length, offset);
     }
 
-    inline void copyFromHost(const void *src, NbElts_t length) override final {
-        copy(src, length);
+    inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+        copy(src, length, offset);
     }
 
-    void copyToHost(void *dst, NbElts_t length) const override final {
-        AIDGE_ASSERT(length <= mData.size() || length <= mTensor.size(), "copy length is above capacity");
-        const T* src = static_cast<const T*>(rawPtr());
-        std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
-                  static_cast<T *>(dst));
+    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
+        const T* src = static_cast<const T*>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        std::copy(src, src + length, static_cast<T *>(dst));
     }
 
     void *rawPtr(NbElts_t offset = 0) override final {
@@ -147,7 +143,7 @@ public:
     };
 
     const void *rawPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const rawPtr");
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
         return (mData.data() + offset);
     };
 
@@ -157,12 +153,12 @@ public:
     };
 
     const void *hostPtr(NbElts_t offset = 0) const override final {
-        AIDGE_ASSERT(mData.size() >= mTensor.size(), "accessing uninitialized const hostPtr");
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
         return (mData.data() + offset);
     };
 
     void setRawPtr(void *ptr, NbElts_t length) override final {
-        AIDGE_ASSERT(length >= mTensor.size(), "trying to set raw pointer of insufficient capacity");
+        AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
         mData = future_std::span<T>(static_cast<T *>(ptr), length);
         mDataOwner.reset();
     };
@@ -171,11 +167,11 @@ public:
 
 private:
     void lazyInit() {
-        if (mData.size() < mTensor.size()) {
+        if (mData.size() < mNbElts) {
             // Need more data, a re-allocation will occur
             AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
-            mDataOwner.reset(new T[mTensor.size()]);
-            mData = future_std::span<T>(mDataOwner.get(), mTensor.size());
+            mDataOwner.reset(new T[mNbElts]);
+            mData = future_std::span<T>(mDataOwner.get(), mNbElts);
         }
     }
 };
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
index b75c49077f190ed61486fea8eaa18152423a73ed..cfcfb45e3735538c1650cfd990ea85e2333916ad 100644
--- a/unit_tests/data/Test_TensorImpl.cpp
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -14,6 +14,7 @@
 #include <catch2/catch_test_macros.hpp>
 
 #include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 
 using namespace Aidge;
@@ -57,3 +58,43 @@ TEST_CASE("Tensor creation") {
     }
   }
 }
+
+TEST_CASE("Tensor methods") {
+  Tensor x = Array3D<int, 2, 2, 2>{{
+    {{1, 2},
+     {3, 4}},
+    {{5, 6},
+     {7, 8}}
+  }};
+
+  Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+  Tensor xFloat =
+      Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
+
+  SECTION("Tensor sharing") {
+    Tensor xCopyCtor(x);
+    REQUIRE(xCopyCtor.getImpl() == x.getImpl());
+
+    Tensor xEqOp = x;
+    REQUIRE(xEqOp.getImpl() == x.getImpl());
+
+    Tensor xCloned = x.clone();
+    REQUIRE(xCloned.getImpl() != x.getImpl());
+    REQUIRE(xCloned == x);
+  }
+
+  SECTION("Tensor extract") {
+    Tensor y = x.extract({0, 1});
+    REQUIRE(y.getImpl() == x.getImpl());
+    REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
+    REQUIRE(y.isContiguous());
+    
+    Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
+    REQUIRE(y2.getImpl() == x.getImpl());
+    REQUIRE(!y2.isContiguous());
+    Tensor y3 = y2.clone();
+    REQUIRE(y3.isContiguous());
+    REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
+  }
+}