diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 1f9c5a5ec14cca4469b0329f2f968cf9dbc7b0de..a5d59e3894cb22f059a4116a38294098024ab371 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -24,6 +24,10 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Sub.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -231,6 +235,102 @@ class Tensor : public Data,
         return *mImpl == *(otherTensor.mImpl);
     }
 
+    /**
+     * @brief Element-wise addition operation for two ``Tensor``s.
+     * @note ``Tensor``s should be stored on the same backend.
+     * @todo If input ``Tensor``s have a different dataType, the output should
+     * have the dataType of the ``Tensor`` with the highest precision.
+     *
+     * @param other
+     * @return Tensor
+     */
+    Tensor operator+(const Tensor& other) const {
+        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
+        auto add_ = Add_Op(2);
+        add_.associateInput(0, std::make_shared<Tensor>(*this));
+        add_.associateInput(1, std::make_shared<Tensor>(other));
+        add_.computeOutputDims();
+        add_.setDataType(dataType());
+        add_.setBackend(mImpl->backend());
+        add_.forward();
+        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+        return add_.getOutput(0)->clone();
+    }
+
+    /**
+     * @brief Element-wise substraction operation for two ``Tensor``s.
+     * @note ``Tensor``s should be stored on the same backend.
+     * @todo If input ``Tensor``s have a different dataType, the output should
+     * have the dataType of the ``Tensor`` with the highest precision.
+     *
+     * @param other
+     * @return Tensor
+     */
+    Tensor operator-(const Tensor& other) const {
+        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
+        auto sub_ = Sub_Op();
+        sub_.associateInput(0, std::make_shared<Tensor>(*this));
+        sub_.associateInput(1, std::make_shared<Tensor>(other));
+        sub_.computeOutputDims();
+        sub_.setDataType(dataType());
+        sub_.setBackend(mImpl->backend());
+        sub_.forward();
+        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+        return sub_.getOutput(0)->clone();
+    }
+
+    /**
+     * @brief Element-wise multiplication operation for two ``Tensor``s.
+     * @note ``Tensor``s should be stored on the same backend.
+     * @todo If input ``Tensor``s have a different dataType, the output should
+     * have the dataType of the ``Tensor`` with the highest precision.
+     *
+     * @param other
+     * @return Tensor
+     */
+    Tensor operator*(const Tensor& other) const {
+        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
+        auto mul_ = Mul_Op();
+        mul_.associateInput(0, std::make_shared<Tensor>(*this));
+        mul_.associateInput(1, std::make_shared<Tensor>(other));
+        mul_.computeOutputDims();
+        mul_.setDataType(dataType());
+        mul_.setBackend(mImpl->backend());
+        mul_.forward();
+        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+        return mul_.getOutput(0)->clone();
+    }
+
+    /**
+     * @brief Element-wise division operation for two ``Tensor``s.
+     * @note ``Tensor``s should be stored on the same backend.
+     * @todo If input ``Tensor``s have a different dataType, the output should
+     * have the dataType of the ``Tensor`` with the highest precision.
+     *
+     * @param other
+     * @return Tensor
+     */
+    Tensor operator/(const Tensor& other) const {
+        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same backend");
+        auto div_ = Div_Op();
+        div_.associateInput(0, std::make_shared<Tensor>(*this));
+        div_.associateInput(1, std::make_shared<Tensor>(other));
+        div_.computeOutputDims();
+        div_.setDataType(dataType());
+        div_.setBackend(mImpl->backend());
+        div_.forward();
+        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+        return div_.getOutput(0)->clone();
+    }
+
 public:
     /**
      * @brief Perform a deep copy of the tensor.
@@ -461,6 +561,26 @@ public:
         return mGrad;
     }
 
+    /**
+     * @brief Associate the gradient with a Tensor instance and set its implementation
+     * if none was previously set.
+     * @note Dimensions for the Tensor instance are copied from the original current Tensor.
+     * @note If a Tensor instance was already associated, only the implementation is created
+     * with values set to 0.
+     * @note If Tensor instance and implementation already existed for the gradient
+     * nothing is done.
+     */
+    void initGradient() {
+        if (!mGrad) {
+            mGrad = std::make_shared<Tensor>(mDims);
+        }
+        if (!mGrad->hasImpl()) {
+            mGrad->setDataType(dataType());
+            mGrad->setBackend(hasImpl() ? mImpl->backend() : "cpu");
+            mGrad->zeros();
+        }
+    }
+
     /**
      * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
      * Beware: do not use this function with the storage index!
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 0d5359156bb8ff23a5b4bdaea93d30b65f8ba702..b350c5bf0fa2b1af6f102c3a74486c159a7505b4 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -19,6 +19,9 @@
 #include "aidge/utils/Types.h"
 
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
+    if (this == &other) {
+        return *this;
+    }
     resize(other.dims(), other.strides());
     setDataType(other.dataType(), false); // do not convert existing data
     if (other.hasImpl()) {
@@ -253,7 +256,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
     AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast non-contiguous tensor");
 
     // Current Tensor has necessarily a data type, but may not have backend
-    if (!getImpl()) {
+    if (!hasImpl()) {
         // If no backend was set for the current tensor, use the same as src
         const auto deviceSrc = src.getImpl()->device();
         setBackend(deviceSrc.first, deviceSrc.second);
@@ -272,7 +275,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
     AIDGE_ASSERT(src.isContiguous(), "cannot copy from non-contiguous tensor");
 
     // Current Tensor has necessarily a data type, but may not have backend
-    if (!getImpl()) {
+    if (!hasImpl()) {
         // If no backend was set for the current tensor, use the same as src
         const auto deviceSrc = src.getImpl()->device();
         setBackend(deviceSrc.first, deviceSrc.second);