diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 7fa6de63f48561ef0585d5bb2f49b1a583710fb2..a34718296e4ccddbfca0b4eb0daf14b08124389a 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -20,6 +20,7 @@
 
 #include "aidge/data/half.hpp"
 #include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
 enum class DataType {
@@ -91,7 +92,19 @@ DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataForm
 
 class Data {
 public:
+    Data() = delete;
+    Data(Data&& other) = default;
+    Data(const Data& other) = default;
     Data(const std::string& type): mType(type) {};
+
+    Data& operator=(const Data& other) {
+        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
+        return *this;
+    };
+    Data& operator=(Data&& other) {
+        AIDGE_ASSERT(other.mType == mType, "Cannot copy a different type fo Data object.");
+        return *this;
+    };
     constexpr const std::string& type() const {
         return mType;
     }
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index cfd54e9aa64a0ad6b5165024284b0e3431cab28c..627a5a4784b4e6546cdfc96b65acbe2a39ee119c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -23,6 +23,8 @@
 #include <type_traits>  // std::is_arithmetic
 #include <vector>
 
+#include <fmt/core.h>
+
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -212,14 +214,13 @@ class Tensor : public Data,
 
     /**
      * @brief Copy dimensions, datatype and data from another Tensor.
-     * If current Tensor already has an implementation, data is copied to the
-     * existing implementation. Tensor backend/device remain untouched.
-     * If current Tensor does not have an implementation, only a shallow copy
-     * is performed and the Tensor will share data with t.
+     * Tensor backend/device are also copied and only a shallow copy
+     * is performed for data. Implementation will be shared with original Tensor.
      * @param other other Tensor object.
      * @return Tensor&
      */
-    Tensor &operator=(const Tensor& other);
+    Tensor &operator=(const Tensor& other) = default;
+    Tensor &operator=(Tensor&& other) = default;
 
     template <typename T>
     constexpr Tensor &operator=(Vector<T> &&arr) {
@@ -273,6 +274,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator+(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor operator+(T val) const { return *this + Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend Tensor operator+(T val, const Tensor& other) { return other + val; }
+
+    Tensor& operator+=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor& operator+=(T val) {return *this += Tensor(val); }
 
     /**
      * @brief Element-wise subtraction operation for two ``Tensor``s.
@@ -284,6 +296,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator-(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator-(T val) const { return *this - Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend inline Tensor operator-(T val, const Tensor& other) { return other - val; }
+
+    Tensor& operator-=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator-=(T val) {return *this -= Tensor(val); }
 
     /**
      * @brief Element-wise multiplication operation for two ``Tensor``s.
@@ -295,6 +318,17 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator*(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator*(T val) const { return *this * Tensor(val); }
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    friend inline Tensor operator*(T val, const Tensor& other) { return other * val; }
+
+    Tensor& operator*=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator*=(T val) {return *this *= Tensor(val); }
 
     /**
      * @brief Element-wise division operation for two ``Tensor``s.
@@ -306,6 +340,14 @@ class Tensor : public Data,
      * @return Tensor
      */
     Tensor operator/(const Tensor& other) const;
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor operator/(T val) const { return *this / Tensor(val); }
+
+    Tensor& operator/=(const Tensor& other);
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    inline Tensor& operator/=(T val) {return *this /= Tensor(val); }
 
     /**
      * @brief Element-wise sqrt operation for Tensor.
@@ -332,14 +374,17 @@ public:
      * @brief Perform a deep copy of the tensor.
     */
     Tensor clone() const {
-        Tensor newTensor(*this);
-        if (!newTensor.isContiguous()) {
-            newTensor.makeContiguous();
-        }
-        else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
-            newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
-            newTensor.setImpl(newImpl);
+        Tensor newTensor(*this); // shallow copy
+        // handle deepcopy of implementation if any
+        if (newTensor.hasImpl()) {
+            if (!newTensor.isContiguous()) {
+                newTensor.makeContiguous();
+            }
+            else {
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
+                newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
+                newTensor.setImpl(newImpl);
+            }
         }
         return newTensor;
     }
@@ -925,4 +970,17 @@ private:
 };
 }  // namespace Aidge
 
+template<>
+struct fmt::formatter<Aidge::Tensor> {
+    template<typename ParseContext>
+    inline constexpr auto parse(ParseContext& ctx) {
+        return ctx.begin();
+    }
+
+    template<typename FormatContext>
+    inline auto format(Aidge::Tensor const& t, FormatContext& ctx) const {
+        return fmt::format_to(ctx.out(), "{}", t.toString());
+    }
+};
+
 #endif /* AIDGE_CORE_DATA_TENSOR_H_ */
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index e287db4e8724f0388c13d438fc2e152fe69021cd..b5601f84c60b81d8c560b61b06c00673f51f4eee 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -44,6 +44,7 @@ bool approxEq(const Tensor& t1, const Tensor& t2, float relative = 1e-5f, float
     }
     for(size_t i = 0; i < t1.size(); ++i){
         if (static_cast<float>(std::abs(t1.get<T1>(i) - t2.get<T2>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T2>(i)))))){
+            fmt::print("t1:\n{}\nt2\n{}\nat index {} {} != {}", t1, t2, i, t1.get<T1>(i), t2.get<T1>(i));
             return false;
         }
     }
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index a1d1889c9a1881d3aa7b6eb9ccb4c23c5314cc80..7fa9e5825983eb0c82d2b1f84b77557e656a7d78 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -37,6 +37,7 @@ void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
     .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
+    .def("clone", &Operator::clone)
     .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index e6f6cd799b48991556b8c99006ab94583459117c..c834167abe15fb8a7ce96053a87a958b7515fe17 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -44,7 +44,24 @@ Tensor Tensor::operator+(const Tensor& other) const {
     add_.setBackend(mImpl->backend());
     add_.forward();
     // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-    return add_.getOutput(0)->clone();
+    return *add_.getOutput(0);
+}
+
+Tensor& Tensor::operator+=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto add_ = Add_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    add_.associateInput(0, thisPtr);
+    add_.associateInput(1, std::make_shared<Tensor>(other));
+    add_.setOutput(0, thisPtr);
+    add_.setDataType(dataType());
+    add_.setDataFormat(dataFormat());
+    add_.setBackend(mImpl->backend());
+    add_.forward();
+    return *this;
 }
 
 
@@ -61,7 +78,25 @@ Tensor Tensor::operator-(const Tensor& other) const {
     sub_.setBackend(mImpl->backend());
     sub_.forward();
     // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-    return sub_.getOutput(0)->clone();
+    return *sub_.getOutput(0);
+}
+
+Tensor& Tensor::operator-=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto sub_ = Sub_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    sub_.associateInput(0, thisPtr);
+    sub_.associateInput(1, std::make_shared<Tensor>(other));
+    sub_.setOutput(0, thisPtr);
+    sub_.setDataType(dataType());
+    sub_.setDataFormat(dataFormat());
+    sub_.setBackend(mImpl->backend());
+    sub_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
 }
 
 
@@ -81,6 +116,24 @@ Tensor Tensor::operator*(const Tensor& other) const {
     return mul_.getOutput(0)->clone();
 }
 
+Tensor& Tensor::operator*=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto mul_ = Mul_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    mul_.associateInput(0, thisPtr);
+    mul_.associateInput(1, std::make_shared<Tensor>(other));
+    mul_.setOutput(0, thisPtr);
+    mul_.setDataType(dataType());
+    mul_.setDataFormat(dataFormat());
+    mul_.setBackend(mImpl->backend());
+    mul_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
+}
+
 
 Tensor Tensor::operator/(const Tensor& other) const {
     AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
@@ -98,6 +151,24 @@ Tensor Tensor::operator/(const Tensor& other) const {
     return div_.getOutput(0)->clone();
 }
 
+Tensor& Tensor::operator/=(const Tensor& other) {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto div_ = Div_Op();
+    const auto thisPtr = std::make_shared<Tensor>(*this);
+    div_.associateInput(0, thisPtr);
+    div_.associateInput(1, std::make_shared<Tensor>(other));
+    div_.setOutput(0, thisPtr);
+    div_.setDataType(dataType());
+    div_.setDataFormat(dataFormat());
+    div_.setBackend(mImpl->backend());
+    div_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return *this;
+}
+
 Tensor Tensor::sqrt() const {
     AIDGE_ASSERT(hasImpl(), "Tensor has no implementation.");
     auto sqrt_ = Sqrt_Op();
@@ -135,24 +206,24 @@ Tensor Tensor::mean() const {
     return mean_.getOutput(0)->clone();
 }
 
-Tensor& Tensor::operator=(const Tensor& other) {
-    if (this == &other) {
-        return *this;
-    }
-    resize(other.dims(), other.strides());
-    setDataType(other.dataType(), false);  // do not convert existing data
-    if (other.hasImpl()) {
-        if (hasImpl()) {
-            copyFrom(other);
-        } else {
-            // Perform a shallow copy only
-            setImpl(other.mImpl, other.mImplOffset);
-        }
-    } else {
-        setImpl(nullptr);
-    }
-    return *this;
-}
+// Tensor& Tensor::operator=(const Tensor& other) {
+//     if (this == &other) {
+//         return *this;
+//     }
+//     resize(other.dims(), other.strides());
+//     setDataType(other.dataType(), false);  // do not convert existing data
+//     if (other.hasImpl()) {
+//         if (hasImpl()) {
+//         //     copyFrom(other);
+//         // } else {
+//             // Perform a shallow copy only
+//             setImpl(other.mImpl, other.mImplOffset);
+//         }
+//     } else {
+//         setImpl(nullptr);
+//     }
+//     return *this;
+// }
 
 
 void Tensor::setBackend(const std::string &name, DeviceIdx_t device, bool copyFrom) {
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index a27e2745b8929e84456ac079d063d94ffa359679..fa77d18e7e3c5b30466304e04cf2ad95affce20e 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -33,7 +33,7 @@ void Aidge::Pop_OpImpl::forward() {
     const Pop_Op& op = dynamic_cast<const Pop_Op&>(mOp);
 
     assert(op.getInput(0) && "missing input #0");
-    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()});
+    *op.getOutput(0) = op.getInput(0)->extract({op.forwardStep()}).clone();
 }
 
 //////////////////////////////////////////////////////////
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 3d48b88ab400596d68cbfa34502e795766ff94f0..9af4586886fc98c50862672392d3b704e6bc1d0c 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -44,7 +44,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
         attr<ProdAttr::Constant>(constant)))
 {
     mOutputs[0] = tensor; // copy the pointer of the Tensor
-    if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+    if (mOutputs[0] && mOutputs[0]->hasImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
         SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
     }
     else {
@@ -61,7 +61,7 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
       mAttributes(op.mAttributes)
 {
-    mOutputs[0] = std::make_shared<Tensor>(*(op.getOutput(0)));
+    *mOutputs[0] = *(op.getOutput(0));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
         SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
     }
@@ -71,7 +71,12 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
 }
 
 std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
-    return std::make_shared<Producer_Op>(*this);
+    // mOutput cannot be nullptr because of OperatorTensor constructor
+    std::shared_ptr<Tensor> newTensor = std::make_shared<Tensor>(mOutputs[0]->clone());
+
+    std::shared_ptr<Producer_Op> newOp = std::make_shared<Producer_Op>(newTensor, constant());
+
+    return newOp;
 }
 
 void Aidge::Producer_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 58003bb4009a484ca63acffdb50fbda156a48787..6c4b14602aed98ff5736d2cf30ba642f9e7ec57b 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -120,7 +120,27 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         ));
     }
     SECTION("copy constructor / copy assignment operator") {
+        Tensor t1 = Array1D<int, 2>{{1, 2}};
+        Tensor t2, t3;
 
+        REQUIRE_NOTHROW(t3 = t1);
+        REQUIRE(t1 == t3);
+
+        REQUIRE_NOTHROW(t2 = Tensor(t1));
+        REQUIRE(t1 == t2);
+
+
+        t1.set<int>(0, 10);
+
+        // check copies are shallow
+        REQUIRE(t2.get<int>(0) == 10);
+        REQUIRE(t3.get<int>(0) == 10);
+
+        // set already existing Tensor
+        Tensor t4 = Array1D<int, 1>{{11}};
+        REQUIRE_NOTHROW(t4 = t1);
+        REQUIRE(t4 == t1);
+        REQUIRE(t4.size() == 2);
     }
     SECTION("move constructor / move assignment operator") {
 
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index a7d02cd2fc1f3782046f3e8a9e7d7ca00b2ec5a7..5bd435e28718d663519e504995fd5b030913d254 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -816,7 +816,7 @@ TEST_CASE("[core/graph] GraphView(replace)", "[GraphView][replace]") {
     }
 }
 
-TEST_CASE("[GraphView] clone") {
+TEST_CASE("[GraphView] clone", "[GraphView][Core][Clone]") {
     auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
     auto conv1 = Conv(3, 32, {3, 3}, "conv1");
     auto conv2 = Conv(32, 64, {3, 3}, "conv2");
diff --git a/unit_tests/operator/Test_PopImpl.cpp b/unit_tests/operator/Test_PopImpl.cpp
index f46131ed8c324f38874eb433f97d13977b4253a4..d3c87ef7289e4516442885f7449060055c428c49 100644
--- a/unit_tests/operator/Test_PopImpl.cpp
+++ b/unit_tests/operator/Test_PopImpl.cpp
@@ -16,21 +16,22 @@
 #include "aidge/operator/Pop.hpp"
 #include "aidge/utils/TensorUtils.hpp"
 
-using Aidge::Tensor;
-using Aidge::Pop;
+using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Pop(forward)", "[Pop][CPU]") {
-    std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{4,5,6}});
-    std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Aidge::Array1D<int,3>{{1,2,3}});
-    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Aidge::Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
+    std::shared_ptr<Tensor> pop1 = std::make_shared<Tensor>(Array1D<int,3>{{4,5,6}});
+    std::shared_ptr<Tensor> pop2 = std::make_shared<Tensor>(Array1D<int,3>{{1,2,3}});
+    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,2,3>{{{1,2,3}, {4,5,6}}});
 
-    auto pop = Aidge::Pop("pop");
-    pop->getOperator()->associateInput(0, input);
-    pop->getOperator()->setBackend("cpu");
-    pop->getOperator()->setDataType(Aidge::DataType::Int32);
+    auto pop = Pop("pop");
+    std::shared_ptr<Pop_Op> op = std::static_pointer_cast<Pop_Op>(pop->getOperator());
+    op->associateInput(0, input);
+    op->setBackend("cpu");
+    op->setDataType(DataType::Int32);
+    op->forwardDims();
 
     REQUIRE_NOTHROW(pop->forward());
-    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop2);
+    REQUIRE(*op->getOutput(0) == *pop2);
     REQUIRE_NOTHROW(pop->forward());
-    REQUIRE(*std::static_pointer_cast<Aidge::OperatorTensor>(pop->getOperator())->getOutput(0) == *pop1);
+    REQUIRE(*op->getOutput(0) == *pop1);
 }