diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 5f6be6045167f6ff523876aaa309a536683810de..fa8b5360dcb42b870bcce25351c416f95d7a2818 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -24,10 +24,7 @@
 
 #include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/Div.hpp"
-#include "aidge/operator/Mul.hpp"
-#include "aidge/operator/Sub.hpp"
+
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ArrayHelpers.hpp"
@@ -242,6 +239,13 @@ class Tensor : public Data,
         return *mImpl == *(otherTensor.mImpl);
     }
 
+    /**
+     * @brief Transposition operation
+     *
+     * @return Tensor
+     */
+    Tensor transpose(const std::vector<DimSize_t> &outputDimsOrder) const;
+
     /**
      * @brief Element-wise addition operation for two ``Tensor``s.
      * @note ``Tensor``s should be stored on the same backend.
@@ -251,21 +255,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator+(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
-        AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
-        auto add_ = Add_Op(2);
-        add_.associateInput(0, std::make_shared<Tensor>(*this));
-        add_.associateInput(1, std::make_shared<Tensor>(other));
-        add_.setDataType(dataType());
-        add_.setDataFormat(dataFormat());
-        add_.setBackend(mImpl->backend());
-        add_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return add_.getOutput(0)->clone();
-    }
+    Tensor operator+(const Tensor& other) const;
 
     /**
      * @brief Element-wise substraction operation for two ``Tensor``s.
@@ -276,21 +266,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator-(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
-        AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
-        auto sub_ = Sub_Op();
-        sub_.associateInput(0, std::make_shared<Tensor>(*this));
-        sub_.associateInput(1, std::make_shared<Tensor>(other));
-        sub_.setDataType(dataType());
-        sub_.setDataFormat(dataFormat());
-        sub_.setBackend(mImpl->backend());
-        sub_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return sub_.getOutput(0)->clone();
-    }
+    Tensor operator-(const Tensor& other) const;
 
     /**
      * @brief Element-wise multiplication operation for two ``Tensor``s.
@@ -301,21 +277,7 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator*(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
-        AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
-        auto mul_ = Mul_Op();
-        mul_.associateInput(0, std::make_shared<Tensor>(*this));
-        mul_.associateInput(1, std::make_shared<Tensor>(other));
-        mul_.setDataType(dataType());
-        mul_.setDataFormat(dataFormat());
-        mul_.setBackend(mImpl->backend());
-        mul_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return mul_.getOutput(0)->clone();
-    }
+    Tensor operator*(const Tensor& other) const;
 
     /**
      * @brief Element-wise division operation for two ``Tensor``s.
@@ -326,21 +288,8 @@ class Tensor : public Data,
      * @param other
      * @return Tensor
      */
-    Tensor operator/(const Tensor& other) const {
-        AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
-        AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
-        AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
-        AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
-        auto div_ = Div_Op();
-        div_.associateInput(0, std::make_shared<Tensor>(*this));
-        div_.associateInput(1, std::make_shared<Tensor>(other));
-        div_.setDataType(dataType());
-        div_.setDataFormat(dataFormat());
-        div_.setBackend(mImpl->backend());
-        div_.forward();
-        // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
-        return div_.getOutput(0)->clone();
-    }
+    Tensor operator/(const Tensor& other) const;
+
 
     ~Tensor() noexcept;
 
@@ -432,7 +381,7 @@ public:
     /**
      * @brief Set the DataFormat of the Tensor and transpose data, only
      * if the Tensor has already been initialized and copyTrans is true.
-     * In this case, a transposition occurs only if both previous format and 
+     * In this case, a transposition occurs only if both previous format and
      * new format are different from DataFormat::Default.
      * @param df New DataFormat
      * @param copyTrans If true (default), when both previous format and new
@@ -512,6 +461,18 @@ public:
      */
     constexpr std::size_t size() const noexcept { return mSize; }
 
+    /**
+     * @brief Return the current capacity of the tensor, i.e. the actual memory
+     * currently being allocated. It can be different from the size:
+     * - Capacity can be 0 if the tensor memory was not yet initialized (because
+     *   of lazy initialization, memory is allocated only when it needs to be
+     *   accessed the first time).
+     * - Capacity can be > size if the tensor was downsized but memory was not
+     *   reallocated.
+    */
+    inline std::size_t capacity() const noexcept { return mImpl->capacity(); }
+
+
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index fac7ff0f6cad4c80700bf40266b0457768e8511b..780258ea748edd25d228cfd9e212f318e59a492e 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -16,8 +16,119 @@
 
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Transpose.hpp"
 #include "aidge/utils/Types.h"
 
+/**
+ * @brief Transposition operation
+ *
+ * @return Tensor
+ */
+Aidge::Tensor Aidge::Tensor::transpose(const std::vector<Aidge::DimSize_t> &outputDimsOrder) const {
+    auto transpose_ = Aidge::Transpose_Op(outputDimsOrder);
+    transpose_.associateInput(0, std::make_shared<Tensor>(*this));
+    transpose_.setDataType(dataType());
+    transpose_.setBackend(mImpl->backend());
+    transpose_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return transpose_.getOutput(0)->clone();
+}
+
+/**
+ * @brief Element-wise addition operation for two ``Tensor``s.
+ * @note ``Tensor``s should be stored on the same backend.
+ * @todo If input ``Tensor``s have a different dataType, the output should
+ * have the dataType of the ``Tensor`` with the highest precision.
+ *
+ * @param other
+ * @return Tensor
+ */
+Aidge::Tensor Aidge::Tensor::operator+(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto add_ = Add_Op(2);
+    add_.associateInput(0, std::make_shared<Tensor>(*this));
+    add_.associateInput(1, std::make_shared<Tensor>(other));
+    add_.setDataType(dataType());
+    add_.setDataFormat(dataFormat());
+    add_.setBackend(mImpl->backend());
+    add_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return add_.getOutput(0)->clone();
+}
+
+/**
+ * @brief Element-wise substraction operation for two ``Tensor``s.
+ * @note ``Tensor``s should be stored on the same backend.
+ * @todo If input ``Tensor``s have a different dataType, the output should
+ * have the dataType of the ``Tensor`` with the highest precision.
+ *
+ * @param other
+ * @return Tensor
+ */
+Aidge::Tensor Aidge::Tensor::operator-(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto sub_ = Sub_Op();
+    sub_.associateInput(0, std::make_shared<Tensor>(*this));
+    sub_.associateInput(1, std::make_shared<Tensor>(other));
+    sub_.setDataType(dataType());
+    sub_.setDataFormat(dataFormat());
+    sub_.setBackend(mImpl->backend());
+    sub_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return sub_.getOutput(0)->clone();
+}
+
+/**
+ * @brief Element-wise multiplication operation for two ``Tensor``s.
+ * @note ``Tensor``s should be stored on the same backend.
+ * @todo If input ``Tensor``s have a different dataType, the output should
+ * have the dataType of the ``Tensor`` with the highest precision.
+ *
+ * @param other
+ * @return Tensor
+ */
+Aidge::Tensor Aidge::Tensor::operator*(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto mul_ = Mul_Op();
+    mul_.associateInput(0, std::make_shared<Tensor>(*this));
+    mul_.associateInput(1, std::make_shared<Tensor>(other));
+    mul_.setDataType(dataType());
+    mul_.setDataFormat(dataFormat());
+    mul_.setBackend(mImpl->backend());
+    mul_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return mul_.getOutput(0)->clone();
+}
+
+Aidge::Tensor Aidge::Tensor::operator/(const Aidge::Tensor& other) const {
+    AIDGE_ASSERT(hasImpl() && other.hasImpl(), "At least one Tensor cannot perform any binary operation because it has no implementation.");
+    AIDGE_ASSERT(mImpl->backend() == other.mImpl->backend(), "Tensors must have the same backend");
+    AIDGE_ASSERT(dataType() == other.dataType(), "Tensors must have the same data type");
+    AIDGE_ASSERT(dataFormat() == other.dataFormat(), "Tensors must have the same data format");
+    auto div_ = Div_Op();
+    div_.associateInput(0, std::make_shared<Tensor>(*this));
+    div_.associateInput(1, std::make_shared<Tensor>(other));
+    div_.setDataType(dataType());
+    div_.setDataFormat(dataFormat());
+    div_.setBackend(mImpl->backend());
+    div_.forward();
+    // using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
+    return div_.getOutput(0)->clone();
+}
+
 Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
     if (this == &other) {
         return *this;