diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index cfd54e9aa64a0ad6b5165024284b0e3431cab28c..9031d19f30e9f634269e71c0ced9d7782d801054 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -212,14 +212,13 @@ class Tensor : public Data,
 
     /**
      * @brief Copy dimensions, datatype and data from another Tensor.
-     * If current Tensor already has an implementation, data is copied to the
-     * existing implementation. Tensor backend/device remain untouched.
-     * If current Tensor does not have an implementation, only a shallow copy
-     * is performed and the Tensor will share data with t.
+     * Tensor backend/device are also copied and only a shallow copy
+     * is performed for data. Implementation will be shared with original Tensor.
      * @param other other Tensor object.
      * @return Tensor&
      */
-    Tensor &operator=(const Tensor& other);
+    Tensor &operator=(const Tensor& other) = default;
+    Tensor &operator=(Tensor&& other) = default;
 
     template <typename T>
     constexpr Tensor &operator=(Vector<T> &&arr) {
@@ -332,14 +331,17 @@ public:
      * @brief Perform a deep copy of the tensor.
     */
     Tensor clone() const {
-        Tensor newTensor(*this);
-        if (!newTensor.isContiguous()) {
-            newTensor.makeContiguous();
-        }
-        else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
-            newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
-            newTensor.setImpl(newImpl);
+        Tensor newTensor(*this); // shallow copy
+        // handle deepcopy of implementation if any
+        if (newTensor.hasImpl()) {
+            if (!newTensor.isContiguous()) {
+                newTensor.makeContiguous();
+            }
+            else {
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
+                newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
+                newTensor.setImpl(newImpl);
+            }
         }
         return newTensor;
     }
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index e6f6cd799b48991556b8c99006ab94583459117c..d54806948246ec97384ffcb4a43cc3c3428ce352 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -135,24 +135,24 @@ Tensor Tensor::mean() const {
     return mean_.getOutput(0)->clone();
 }
 
-Tensor& Tensor::operator=(const Tensor& other) {
-    if (this == &other) {
-        return *this;
-    }
-    resize(other.dims(), other.strides());
-    setDataType(other.dataType(), false);  // do not convert existing data
-    if (other.hasImpl()) {
-        if (hasImpl()) {
-            copyFrom(other);
-        } else {
-            // Perform a shallow copy only
-            setImpl(other.mImpl, other.mImplOffset);
-        }
-    } else {
-        setImpl(nullptr);
-    }
-    return *this;
-}
+// Tensor& Tensor::operator=(const Tensor& other) {
+//     if (this == &other) {
+//         return *this;
+//     }
+//     resize(other.dims(), other.strides());
+//     setDataType(other.dataType(), false);  // do not convert existing data
+//     if (other.hasImpl()) {
+//         if (hasImpl()) {
+//         //     copyFrom(other);
+//         // } else {
+//             // Perform a shallow copy only
+//             setImpl(other.mImpl, other.mImplOffset);
+//         }
+//     } else {
+//         setImpl(nullptr);
+//     }
+//     return *this;
+// }
 
 
 void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {