diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 8be15d1d8259ce606e410eb16bcd189ec0ff7b0e..d66f12dd02678d091b01d4fd41e50d8cb58ed17d 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -36,8 +36,8 @@ public:
     /// @param length Nb of element to copy from the buffer
     virtual void copy(Byte_t const *const src, NbElts_t length) = 0;
     /// @deprecated see API V2
-    /// @todo This function returns the address of a data allocated on the spot if it does
-    /// not exists, which is undocumented. Is it a valid design. reconsider.
+    /// @todo This function returns the address of a data allocated on the spot if it
+    /// does not exists, which is undocumented. Is it a valid design. reconsider.
     virtual Byte_t *rawPtr() = 0;
     /// @brief Size of one scalar (in bytes)
     inline std::size_t scalarSize() const noexcept
@@ -82,7 +82,8 @@ public:
     /// @brief Returns a vector of the tensor in-memory dimensions.
     /// @return Reference to a const vector of D sizes expressed in number of elements
     /// along a dimension.<br>
-    /// D is the number of dimensions of the tensor (thus equals getDimensions().size())
+    /// D is the number of dimensions of the tensor (thus equals
+    /// getDimensions().size())
     inline std::vector<DimSize_t> const &getDimensions() const noexcept
     {
         return mvDimensions;
@@ -147,8 +148,8 @@ public:
         std::size_t In = flatIdx;
         // trick for reverse loop without index underflow
         // when i is 1, (i--)>0 is true but i contains 0 on ouput
-        // when i is 0,  (i--)>0 is false leaving the loop, i has underflow but its value
-        // is not used anymore
+        // when i is 0,  (i--)>0 is false leaving the loop, i has underflow but its
+        // value is not used anymore
         /// @todo implement it with synchronized reverse iterators
         for (std::size_t i = mvDimensions.size(); (i--) > 0;)
         {
@@ -160,7 +161,8 @@ public:
         }
     };
 
-    /// @brief Get the linear index of the first Byte_t of the data at given coordinates
+    /// @brief Get the linear index of the first Byte_t of the data at given
+    /// coordinates
     /// @param coordIdx coordinates of the desired data
     /// @note The index is expressed in number of elements
     /// @return Linear index of the first Byte_t of the data at given coordinates
@@ -172,7 +174,8 @@ public:
             assert(
                 ((coordIdx[i] - mvFirstDataCoordinates[i]) < mvDimensions[i])
                 && (coordIdx[i] >= mvFirstDataCoordinates[i])
-                && "Coordinates dimensions does not fit the dimensions of the tensor");
+                && "Coordinates dimensions does not fit the dimensions of the "
+                   "tensor");
             flatIdx += (coordIdx[i] * mvLayout[i]);
         }
         return flatIdx / mScalarSize;
@@ -181,8 +184,8 @@ public:
     /// @brief Change TensorImpl dimensions
     /// @note Current API does not change first data coordinates
     /// @note Preexisting data are lost whatever dims value
-    /// @note If new dimensions are not large enough to hold the Tensors that referenced
-    /// this implementation is undefined behavior
+    /// @note If new dimensions are not large enough to hold the Tensors that
+    /// referenced this implementation is undefined behavior
     /// @param dims New dimensions
     /// @todo first data coordinates should be redefined also
     virtual void resize(const std::vector<DimSize_t> &dims) = 0;
@@ -193,8 +196,8 @@ private:
     /// @brief Tensor whose the TensorImpl manage the memory
     /// @deprecated Future version may be referenced by several Tensors
     /// @todo edesign must be
-    /// considered so that a TensorImpl either does not know of its user or it has a list
-    /// of all of them
+    /// considered so that a TensorImpl either does not know of its user or it has a
+    /// list of all of them
     Tensor const &mTensor;
     /// @brief Number of stored data
     /// @details mNbElts == prod(mDimensions)/mScalarSize
@@ -205,8 +208,8 @@ private:
     /// @todo Implement overflow protection as mNbElts*mScalarSize must fit into a
     /// std::size_t
     std::size_t mScalarSize = 0;
-    /// @brief Actual in-memory tensor dimensions expressed as number of elements along
-    /// each dimension
+    /// @brief Actual in-memory tensor dimensions expressed as number of elements
+    /// along each dimension
     std::vector<DimSize_t> mvDimensions;
     /// @brief memory tensor layout
     /// @details Let ptr be the memory address of a data of logical coordinates
@@ -223,7 +226,8 @@ private:
     /// the actual data pointer type, all the data can be traversed by simple
     /// pointer arithmetic.
     /// @deprecated Such storage is meaningless for most backend except CPU.
-    /// @todo If needed, replace by a _any storing a backend-dependant handle on storage.
+    /// @todo If needed, replace by a _any storing a backend-dependant handle on
+    /// storage.
     Byte_t *mStorage = nullptr;
     /// @brief Logical coordinates of the data stored at mStorage.
     std::vector<Coord_t> mvFirstDataCoordinates;
@@ -305,8 +309,9 @@ protected:
 
         // supposedly more efficient reverse_iterator based solution but much less
         // readable auto pLayout = std::rbegin(mvLayout); auto pFinish =
-        // std::rend(mvLayout); auto pDimension = std::rbegin(mvDimensions); *pLayout =
-        // mScalarSize; auto pPrevLayout = pLayout; for (++pLayout; pLayout != pFinish;
+        // std::rend(mvLayout); auto pDimension = std::rbegin(mvDimensions); *pLayout
+        // = mScalarSize; auto pPrevLayout = pLayout; for (++pLayout; pLayout !=
+        // pFinish;
         // ++pLayout, ++pDimension, ++pPrevLayout)
         // {
         //     *pLayout = (*pPrevLayout) * (*pDimension);
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 13cd605290a2f83e309b93e0686af3493ae201d0..de02654d863a28fe933aff9fb194a01321861db5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -112,10 +112,9 @@ public:
     /**
      * @brief Construct a new Tensor object copied from another one.
      * @param otherTensor Tensor to copy
-     * @details Copy only the active area, without existing context and using the same
-     * backend as the input tensor.
+     * @details Performs a deep copy of all the data in the source tensor, using the same
+     * backend.
      */
-
     Tensor(const Tensor &otherTensor);
 
     /**