diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index d03ff86ae0a927ec0bf7054143030c490c4b0a80..600f0badc849046e69c02a5e4e003788ad0ff473 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -32,6 +32,7 @@ public:
     /// @param i_FirstDataCoordinates Logical coordinates of the data at null natural
     /// coordinates
     /// @param i_Dimensions Tensor dimensions
+    /// @note Current API does not require a storage being allocated at this point.
     /// @sa Coord_t
     TensorImpl(
         const char *backend,
@@ -53,10 +54,9 @@ public:
     /// @details Copy all characteristics of calling TensorImpl and its data (deep copy).
     /// @return Pointer to a copy of the TensorImpl object
     virtual detail::pimpl::ImplPtr_t Clone() const = 0;
-    /// @deprecated see API V2
-    /// @param src pointer to the raw buffer from which the data will be copied
+    /// @param src pointer to the raw host buffer from which the data will be copied
     /// @param length Nb of element to copy from the buffer
-    virtual void copy(Byte_t const *const src, NbElts_t length) = 0;
+    virtual void copyFromHost(Byte_t const *const src, NbElts_t length) = 0;
     /// @deprecated see API V2
     /// @todo This function returns the address of a data allocated on the spot if it
     /// does not exists, which is undocumented. Is it a valid design. reconsider.
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 9ed78b43c621f84566b3e053b6c5e70cde0a8899..345586b5a72ffbc9e5895d49da5eaa8adca4e4c2 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -97,7 +97,7 @@ private:
     ///@brief Copy data from a raw buffer inside the Tensor data implementation
     /// @param srcPtr pointer to the raw buffer from which the data will be copied
     /// @param Length Nb of element to copy from the buffer
-    void copyData(Byte_t const *const srcPtr, NbElts_t const Length);
+    void copyFromHost(Byte_t const *const srcPtr, NbElts_t const Length);
     /// @brief enum to specify data type.
     DataType mDataType = DataType::Undefined;
     /// @brief Logical coordinates of the first data (lexicographic order) of the tensor
@@ -148,15 +148,19 @@ public:
      * @param i_FirstDataLogicalCoordinates Logical coordinates of the first data of the
      * area (the point whose all coordinates are minimal).
      * @param i_Dimensions Dimensions of the area.
-     * @details So far, the new Tensor is sharing its data with the source one: modifying
-     * the new tensor will modify also data in the source tensor as they share the same
-     * storage.
+     * @param i_Sharing indicates if the data is shared with the source Tensor
+     * (default).
+     * @details If i_Shallow is true, the new Tensor is sharing its data with the source
+     * one: modifying the new tensor will modify also data in the source tensor as they
+     * share the same storage.<br>
+     * Otherwise, Tensor creates it's own storage, with the same backend as source.
      * @warning The behaviour is undefined if source Tensor has no implementation
      */
     Tensor(
         const Tensor &otherTensor,
         std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
-        std::vector<DimSize_t> const &i_Dimensions);
+        std::vector<DimSize_t> const &i_Dimensions,
+        bool const i_Sharing = true);
 
     /**
      * @brief Construct a new Tensor object from the 1-dimension Array helper.
@@ -180,7 +184,7 @@ public:
         mDims = {SIZE_0};
         mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
             mDataType, mvActiveAreaOrigin, mDims);
-        copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
+        copyFromHost(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
     }
 
     template<typename T, std::size_t SIZE_0>
@@ -192,7 +196,7 @@ public:
             mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
                 mDataType, mvActiveAreaOrigin, mDims);
         }
-        copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
+        copyFromHost(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
         return *this;
     }
 
@@ -213,7 +217,7 @@ public:
         mSize(SIZE_0 * SIZE_1),
         mSizeM1(SIZE_1)
     {
-        copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1);
+        copyFromHost(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1);
     }
 
     template<typename T, std::size_t SIZE_0, std::size_t SIZE_1>
@@ -225,7 +229,7 @@ public:
             mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
                 mDataType, mvActiveAreaOrigin, mDims);
         }
-        copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1);
+        copyFromHost(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1);
         return *this;
     }
 
@@ -247,7 +251,8 @@ public:
         mSize(SIZE_0 * SIZE_1 * SIZE_2),
         mSizeM1(SIZE_1 * SIZE_2)
     {
-        copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1 * SIZE_2);
+        copyFromHost(
+            reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1 * SIZE_2);
     }
 
     template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
@@ -259,7 +264,8 @@ public:
             mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
                 mDataType, mvActiveAreaOrigin, mDims);
         }
-        copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1 * SIZE_2);
+        copyFromHost(
+            reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1 * SIZE_2);
         return *this;
     }
 
@@ -287,7 +293,7 @@ public:
         mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3),
         mSizeM1(SIZE_1 * SIZE_2 * SIZE_3)
     {
-        copyData(
+        copyFromHost(
             reinterpret_cast<Byte_t const *>(&(arr.data)),
             SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
@@ -306,7 +312,7 @@ public:
             mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
                 mDataType, mvActiveAreaOrigin, mDims);
         }
-        copyData(
+        copyFromHost(
             reinterpret_cast<Byte_t const *>(&(arr.data)),
             SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
         return *this;
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 719b7a57d8b36973c6b44c82bd7f9a128851940d..ed9db85e9f991c8607974a4a9a9c17652a70b478 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -50,7 +50,7 @@ void addCtor(py::class_<
             if (availableBackends.find("cpu") != availableBackends.end())
             {
                 newTensor->setBackend("cpu");
-                newTensor->getImpl().copy(
+                newTensor->getImpl().copyFromHost(
                     reinterpret_cast<Byte_t*>(info.ptr), newTensor->size());
             }
             else
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 8b7f3d54bac08c43616e1fa1582ea49337f7bd7b..f2364ce312b74238aa7593697b1b6d4681740a01 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -93,10 +93,11 @@ void Tensor::setBackend(const std::string &name)
                 {name, mDataType})(mDataType, mvActiveAreaOrigin, mDims);
             if (hasData())
             {
+                /// @todo FIXME
                 assert(false && "So far copy between different backend is not supported");
                 // _any getMemoryLocation();
                 Byte_t const *data = getDataAddress();
-                newImpl->copy(
+                newImpl->copyFromHost(
                     data, size()); // /!\ it does not cast data but reinterpret them
             }
             mImpl = std::move(newImpl);
@@ -110,9 +111,9 @@ void Tensor::setBackend(const std::string &name)
 }
 
 ///@brief Copy data from a raw buffer inside the Tensor data implementation
-void Tensor::copyData(Byte_t const *const srcPtr, std::size_t const Bytes)
+void Tensor::copyFromHost(Byte_t const *const srcPtr, std::size_t const Bytes)
 {
-    mImpl->copy(srcPtr, Bytes);
+    mImpl->copyFromHost(srcPtr, Bytes);
 }
 
 void Tensor::getCoord(
@@ -153,8 +154,7 @@ Tensor &Tensor::operator=(const Tensor &t)
     setDatatype(t.dataType());
     if (t.hasData())
     {
-        setBackend(t.mImpl->backend());
-        mImpl->copy(t.getDataAddress(), size());
+        mImpl = t.getImpl().Clone();
     }
     else
     {
@@ -175,13 +175,15 @@ void Tensor::setDatatype(const DataType dt)
             {mImpl->backend(), dt})(mDataType, mvActiveAreaOrigin, mDims);
         if (hasData())
         {
+            /// @todo FIXME
             assert(
                 false
                 && "So far copy between different data types is not properly "
                    "implemented");
             // _any getMemoryLocation();
             Byte_t const *data = getDataAddress();
-            newImpl->copy(data, size()); // /!\ it does not cast data but reinterpret them
+            newImpl->copyFromHost(
+                data, size()); // /!\ it does not cast data but reinterpret them
         }
         mImpl = std::move(newImpl);
     }
@@ -220,7 +222,8 @@ Tensor::Tensor(const Tensor &otherTensor) :
 Tensor::Tensor(
     const Tensor &otherTensor,
     std::vector<Coord_t> const &i_FirstDataLogicalCoordinates,
-    std::vector<DimSize_t> const &i_Dimensions) :
+    std::vector<DimSize_t> const &i_Dimensions,
+    bool const i_Sharing) :
     Data(Type),
     mDataType(otherTensor.mDataType),
     mvActiveAreaOrigin(i_FirstDataLogicalCoordinates),
@@ -236,8 +239,17 @@ Tensor::Tensor(
             mvActiveAreaOrigin, mDims, otherTensor.mvActiveAreaOrigin, otherTensor.mDims)
         && "Requested active area is not available inside source Tensor");
     computeSize();
-    // now sharing data
-    mImpl = otherTensor.mImpl;
+    if (i_Sharing)
+    {
+        // now sharing data
+        mImpl = otherTensor.mImpl;
+    }
+    else
+    {
+        /// @todo WIP
+        mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), mDataType})(
+            mDataType, mvActiveAreaOrigin, mDims);
+    }
 }
 
 void Tensor::resize(const std::vector<DimSize_t> &dims)