diff --git a/CHANGELOG b/CHANGELOG
index 0031beb91337e681884cd5a1d8c420a099a27861..b53d52720c420583973bee58f8ba2b290f0879af 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,6 +1,8 @@
+# Version 0.4.0 (December 2024)
+
 # Version 0.2.1 (May 14, 2024)
 
-* rework export mechanism 
+* rework export mechanism
 * change `Operator::computeOutputDims()` with `Operator::forwardDims()`
 * automatic docstring decorators for python
 * add implementation of Operators only performing data/format manipulation
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 7fa6de63f48561ef0585d5bb2f49b1a583710fb2..a34718296e4ccddbfca0b4eb0daf14b08124389a 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -20,6 +20,7 @@
 
 #include "aidge/data/half.hpp"
 #include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 enum class DataType {
@@ -91,7 +92,19 @@ DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataForm
 
 class Data {
 public:
+    Data() = delete;
+    Data(Data&& other) = default;
+    Data(const Data& other) = default;
     Data(const std::string& type): mType(type) {};
+
+    Data& operator=(const Data& other) {
+        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
+        return *this;
+    };
+    Data& operator=(Data&& other) {
+        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
+        return *this;
+    };
     constexpr const std::string& type() const {
         return mType;
     }
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index cfd54e9aa64a0ad6b5165024284b0e3431cab28c..627a5a4784b4e6546cdfc96b65acbe2a39ee119c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -23,6 +23,8 @@
 #include <type_traits>  // std::is_arithmetic
 #include <vector>
 
+#include <fmt/core.h>
+
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -212,14 +214,13 @@ class Tensor : public Data,
 
     /**
      * @brief Copy dimensions, datatype and data from another Tensor.
-     * If current Tensor already has an implementation, data is copied to the
-     * existing implementation. Tensor backend/device remain untouched.
-     * If current Tensor does not have an implementation, only a shallow copy
-     * is performed and the Tensor will share data with t.
+     * Tensor backend/device are also copied and only a shallow copy
+     * is performed for data. Implementation will be shared with original Tensor.
      * @param other other Tensor object.
      * @return Tensor&
      */
-    Tensor &operator=(const Tensor& other);
+    Tensor &operator=(const Tensor& other) = default;
+    Tensor &operator=(Tensor&& other) = default;
 
     template <typename T>
     constexpr Tensor &operator=(Vector<T> &&arr) {
@@ -273,6 +274,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator+(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor operator+(T val) const { return *this + Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend Tensor operator+(T val, const Tensor& other) { return other + val; }
+
+    Tensor& operator+=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor& operator+=(T val) {return *this += Tensor(val); }
 
     /**
      * @brief Element-wise subtraction operation for two ``Tensor``s.
@@ -284,6 +296,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator-(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator-(T val) const { return *this - Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend inline Tensor operator-(T val, const Tensor& other) { return other - val; }
+
+    Tensor& operator-=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator-=(T val) {return *this -= Tensor(val); }
 
     /**
      * @brief Element-wise multiplication operation for two ``Tensor``s.
@@ -295,6 +318,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator*(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator*(T val) const { return *this * Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend inline Tensor operator*(T val, const Tensor& other) { return other * val; }
+
+    Tensor& operator*=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator*=(T val) {return *this *= Tensor(val); }
 
     /**
      * @brief Element-wise division operation for two ``Tensor``s.
@@ -306,6 +340,14 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator/(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator/(T val) const { return *this / Tensor(val); }
+
+    Tensor& operator/=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator/=(T val) {return *this /= Tensor(val); }
 
     /**
      * @brief Element-wise sqrt operation for Tensor.
@@ -332,14 +374,17 @@ public:
      * @brief Perform a deep copy of the tensor.
     */
     Tensor clone() const {
-        Tensor newTensor(*this);
-        if (!newTensor.isContiguous()) {
-            newTensor.makeContiguous();
-        }
-        else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
-            newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
-            newTensor.setImpl(newImpl);
+        Tensor newTensor(*this); // shallow copy
+        // handle deepcopy of implementation if any
+        if (newTensor.hasImpl()) {
+            if (!newTensor.isContiguous()) {
+                newTensor.makeContiguous();
+            }
+            else {
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
+                newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
+                newTensor.setImpl(newImpl);
+            }
         }
         return newTensor;
     }
@@ -925,4 +970,17 @@ private:
 };
 }  // namespace Aidge
 
+template<>
+struct fmt::formatter<Aidge::Tensor> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::Tensor const& t, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}", t.toString());
+    }
+};
+
 #endif /* AIDGE_CORE_DATA_TENSOR_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 4ef39f63a2f9af34cd3fe28b01cf2fc195bdfc6e..9465667babb0978e33de3cf9f155d9b2e9d495b4 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -23,6 +23,9 @@
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
+//Caution: This operator is now deprecated and should no longer be used. 
+//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
+
 namespace Aidge {
 enum class ScalingAttr {
     ScalingFactor, QuantizedNbBits, IsOutputUnsigned
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index e287db4e8724f0388c13d438fc2e152fe69021cd..b5601f84c60b81d8c560b61b06c00673f51f4eee 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -44,6 +44,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
     }
     for(size_t i = 0; i < t1.size(); ++i){
         if (static_cast<float>(std::abs(t1.get<T1>(i) - t2.get<T2>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T2>(i)))))){
+            fmt::print("t1:\n{}\nt2\n{}\nat index {} {} != {}", t1, t2, i, t1.get<T1>(i), t2.get<T1>(i));
             return false;
         }
     }
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index ce70a4d7a6f2d5acc1bb69ba43ba7509c074a99a..ded3b54088e6d1ed473ed614e23fc08cd89a0346 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -37,6 +37,7 @@ void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
     .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
+    .def("clone", &Operator::clone)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index e6f6cd799b48991556b8c99006ab94583459117c..c834167abe15fb8a7ce96053a87a958b7515fe17 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -44,7 +44,24 @@ Tensor Tensor::operator+(const Tensor& other) const {
     add_.setBackend(mImpl->backend());
     add_.forward();
     // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-    return add_.getOutput(0)->clone();
+    return *add_.getOutput(0);
+}
+
+Tensor& Tensor::operator+=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto add_ = Add_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    add_.associateInput(0, thisPtr);
+    add_.associateInput(1, std::make_shared<Tensor>(other));
+    add_.setOutput(0, thisPtr);
+    add_.setDataType(dataType());
+    add_.setDataFormat(dataFormat());
+    add_.setBackend(mImpl->backend());
+    add_.forward();
+    return *this;
 }
 
 
@@ -61,7 +78,25 @@ Tensor Tensor::operator-(const Tensor& other) const {
     sub_.setBackend(mImpl->backend());
     sub_.forward();
     // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-    return sub_.getOutput(0)->clone();
+    return *sub_.getOutput(0);
+}
+
+Tensor& Tensor::operator-=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto sub_ = Sub_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    sub_.associateInput(0, thisPtr);
+    sub_.associateInput(1, std::make_shared<Tensor>(other));
+    sub_.setOutput(0, thisPtr);
+    sub_.setDataType(dataType());
+    sub_.setDataFormat(dataFormat());
+    sub_.setBackend(mImpl->backend());
+    sub_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
 }
 
 
@@ -81,6 +116,24 @@ Tensor Tensor::operator*(const Tensor& other) const {
     return mul_.getOutput(0)->clone();
 }
 
+Tensor& Tensor::operator*=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto mul_ = Mul_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    mul_.associateInput(0, thisPtr);
+    mul_.associateInput(1, std::make_shared<Tensor>(other));
+    mul_.setOutput(0, thisPtr);
+    mul_.setDataType(dataType());
+    mul_.setDataFormat(dataFormat());
+    mul_.setBackend(mImpl->backend());
+    mul_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
+}
+
 
 Tensor Tensor::operator/(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
@@ -98,6 +151,24 @@ Tensor Tensor::operator/(const Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
+Tensor& Tensor::operator/=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto div_ = Div_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    div_.associateInput(0, thisPtr);
+    div_.associateInput(1, std::make_shared<Tensor>(other));
+    div_.setOutput(0, thisPtr);
+    div_.setDataType(dataType());
+    div_.setDataFormat(dataFormat());
+    div_.setBackend(mImpl->backend());
+    div_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
+}
+
 Tensor Tensor::sqrt() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto sqrt_ = Sqrt_Op();
@@ -135,24 +206,24 @@ Tensor Tensor::mean() const {
     return mean_.getOutput(0)->clone();
 }
 
-Tensor& Tensor::operator=(const Tensor& other) {
-    if (this == &other) {
-        return *this;
-    }
-    resize(other.dims(), other.strides());
-    setDataType(other.dataType(), false);  // do not convert existing data
-    if (other.hasImpl()) {
-        if (hasImpl()) {
-            copyFrom(other);
-        } else {
-            // Perform a shallow copy only
-            setImpl(other.mImpl, other.mImplOffset);
-        }
-    } else {
-        setImpl(nullptr);
-    }
-    return *this;
-}
+// Tensor& Tensor::operator=(const Tensor& other) {
+//     if (this == &other) {
+//         return *this;
+//     }
+//     resize(other.dims(), other.strides());
+//     setDataType(other.dataType(), false);  // do not convert existing data
+//     if (other.hasImpl()) {
+//         if (hasImpl()) {
+//         //     copyFrom(other);
+//         // } else {
+//             // Perform a shallow copy only
+//             setImpl(other.mImpl, other.mImplOffset);
+//         }
+//     } else {
+//         setImpl(nullptr);
+//     }
+//     return *this;
+// }
 
 
 void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index cd307c9d15043d3ee5f5de48695e04e4ad2ada6b..ae3c3ed6ca85c059204c524f467f5387f656e30b 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -96,7 +96,9 @@ void Aidge::MetaOperator_Op::setBackend(const std::string &name, Aidge::DeviceId
         for(auto i: mGraph->inputNodes()){
             auto op_i = std::static_pointer_cast<OperatorTensor>(i->getOperator());
             for(std::size_t in_idx=0; in_idx < op_i->nbInputs(); ++in_idx){
-                op_i->getInput(in_idx)->setBackend(name, device);
+                if (op_i->getInput(in_idx)) {
+                    op_i->getInput(in_idx)->setBackend(name, device);
+                }
             }
         }
         for(auto o: mGraph->outputNodes()){
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index a27e2745b8929e84456ac079d063d94ffa359679..fa77d18e7e3c5b30466304e04cf2ad95affce20e 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -33,7 +33,7 @@ void Aidge::Pop_OpImpl::forward() {
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
 
     assert(op.getInput(0) && "missing input #0");
-    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
 }
 
 //////////////////////////////////////////////////////////
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 3d48b88ab400596d68cbfa34502e795766ff94f0..9af4586886fc98c50862672392d3b704e6bc1d0c 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -44,7 +44,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
         attr<ProdAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
-    if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+    if (mOutputs[0] && mOutputs[0]->hasImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
         SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
     }
     else {
@@ -61,7 +61,7 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
       mAttributes(op.mAttributes)
 {
-    mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
+    *mOutputs[0] = *(op.getOutput(0));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
         SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
     }
@@ -71,7 +71,12 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
 }
 
 std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
-    return std::make_shared<Producer_Op>(*this);
+    // mOutput cannot be nullptr because of OperatorTensor constructor
+    std::shared_ptr<Tensor> newTensor = std::make_shared<Tensor>(mOutputs[0]->clone());
+
+    std::shared_ptr<Producer_Op> newOp = std::make_shared<Producer_Op>(newTensor, constant());
+
+    return newOp;
 }
 
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index 5ac08cd2245e0caa3ca7072c70ccc69bcfcf9558..268a14cf9759a6e03302680814778da4804dcc19 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -18,6 +18,10 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
+
+//Caution: This operator is now deprecated and should no longer be used. 
+//It has been replaced by the MetaOperator "Quantizer" (located directly in aidge_quantization).
+
 const std::string Aidge::Scaling_Op::Type = "Scaling";
 
 Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOutputUnsigned)
@@ -26,12 +30,15 @@ Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOu
         attr<ScalingAttr::ScalingFactor>(scalingFactor),
         attr<ScalingAttr::QuantizedNbBits>(nbBits),
         attr<ScalingAttr::IsOutputUnsigned>(isOutputUnsigned)))
-{}
+{
+    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used.\nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
+} 
 
 Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
     : OperatorTensor(op),
     mAttributes(op.mAttributes)
 {
+    Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
     if (op.mImpl){
         SET_IMPL_MACRO(Scaling_Op, *this, op.backend());
     } else {
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 58003bb4009a484ca63acffdb50fbda156a48787..6c4b14602aed98ff5736d2cf30ba642f9e7ec57b 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -120,7 +120,27 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         ));
     }
     SECTION("copy constructor / copy assignment operator") {
+        Tensor t1 = Array1D<int, 2>{{1, 2}};
+        Tensor t2, t3;
 
+        REQUIRE_NOTHROW(t3 = t1);
+        REQUIRE(t1 == t3);
+
+        REQUIRE_NOTHROW(t2 = Tensor(t1));
+        REQUIRE(t1 == t2);
+
+
+        t1.set<int>(0, 10);
+
+        // check copies are shallow
+        REQUIRE(t2.get<int>(0) == 10);
+        REQUIRE(t3.get<int>(0) == 10);
+
+        // set already existing Tensor
+        Tensor t4 = Array1D<int, 1>{{11}};
+        REQUIRE_NOTHROW(t4 = t1);
+        REQUIRE(t4 == t1);
+        REQUIRE(t4.size() == 2);
     }
     SECTION("move constructor / move assignment operator") {
 
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index a7d02cd2fc1f3782046f3e8a9e7d7ca00b2ec5a7..5bd435e28718d663519e504995fd5b030913d254 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -816,7 +816,7 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     }
 }
 
-TEST_CASE("[GraphView] clone") {
+TEST_CASE("[GraphView] clone", "[GraphView][Core][Clone]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
     auto conv2 = Conv(32, 64, {3, 3}, "conv2");
diff --git a/unit_tests/operator/Test_PopImpl.cpp b/unit_tests/operator/Test_PopImpl.cpp
index f46131ed8c324f38874eb433f97d13977b4253a4..d3c87ef7289e4516442885f7449060055c428c49 100644
--- a/unit_tests/operator/Test_PopImpl.cpp
+++ b/unit_tests/operator/Test_PopImpl.cpp
@@ -16,21 +16,22 @@
 #include "aidge/operator/Pop.hpp"
 #include "aidge/utils/TensorUtils.hpp"
 
-using Aidge::Tensor;
-using Aidge::Pop;
+using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Pop(forward)", "[Pop][CPU]") {
-    std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{4,5,6}});
-    std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{1,2,3}});
-    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Aidge::Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
+    std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Array1D<int,3>{{4,5,6}});
+    std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Array1D<int,3>{{1,2,3}});
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
 
-    auto pop = Aidge::Pop("pop");
-    pop->getOperator()->associateInput(0, input);
-    pop->getOperator()->setBackend("cpu");
-    pop->getOperator()->setDataType(Aidge::DataType::Int32);
+    auto pop = Pop("pop");
+    std::shared_ptr<Pop_Op> op = std::static_pointer_cast<Pop_Op>(pop->getOperator());
+    op->associateInput(0, input);
+    op->setBackend("cpu");
+    op->setDataType(DataType::Int32);
+    op->forwardDims();
 
     REQUIRE_NOTHROW(pop->forward());
-    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop2);
+    REQUIRE(*op->getOutput(0) == *pop2);
     REQUIRE_NOTHROW(pop->forward());
-    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop1);
+    REQUIRE(*op->getOutput(0) == *pop1);
 }
diff --git a/version.txt b/version.txt
index 0d91a54c7d439e84e3dd17d3594f1b2b6737f430..1d0ba9ea182b0f7354f3daf12120744ec5e0c2f8 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.3.0
+0.4.0