From 9743cb8f97f9b2a8225aa8fd038edb6714eba6b7 Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Mon, 12 Feb 2024 09:37:19 +0000
Subject: [PATCH] Move two functions to Tensor src instead of header

---
 include/aidge/data/Tensor.hpp | 165 +++++-----------------------------
 src/data/Tensor.cpp           | 137 +++++++++++++++++++++++++++-
 2 files changed, 156 insertions(+), 146 deletions(-)

diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 658c0b497..9268b94f1 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -63,7 +63,7 @@ class Tensor : public Data,
      * @brief Construct a new Tensor object from another one (shallow copy).
      * Data memory is not copied, but shared between the new Tensor and the
      * initial one.
-     * 
+     *
      * @param otherTensor
      */
     Tensor(const Tensor&)            = default;
@@ -306,7 +306,7 @@ class Tensor : public Data,
 
     /**
      * @brief Set the Impl object
-     * 
+     *
      * @param impl New impl shared pointer
      * @param implOffset Storage offset in this new impl for this Tensor
      */
@@ -375,7 +375,7 @@ class Tensor : public Data,
      * @param dims New dimensions
      */
     template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
-    void resize(const std::array<DimSize_t, DIM> &dims) {
+    inline void resize(const std::array<DimSize_t, DIM> &dims) {
         resize(std::vector<DimSize_t>(dims.begin(), dims.end()));
     }
 
@@ -390,48 +390,7 @@ class Tensor : public Data,
      * @param dims New dimensions
      * @param strides Stride of the tensor (if not specified, "nested" stride is used)
      */
-    void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>()) {
-        bool checkContiguous = true;
-        if (strides.empty()) {
-            strides.resize(dims.size());
-            size_t expectedStride = 1;
-            for (int dim = dims.size() - 1; dim >= 0; --dim) {
-                strides[dim] = expectedStride;
-                expectedStride*= dims[dim];
-            }
-            checkContiguous = false;
-        }
-        else {
-            AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
-        }
-
-        if (mImpl.use_count() > 1) {
-            // Here we could also create a new storage for this tensor in this case
-            // But, is it more likely that the user really wants this, or that he did a mistake?
-            AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
-        }
-        else {
-            mDims = dims;
-            mStrides = strides;
-
-            mContiguous = true;
-            if (checkContiguous) {
-                size_t expectedStride = 1;
-                for (int dim = dims.size() - 1; dim >= 0; --dim) {
-                    if (strides[dim] != expectedStride) {
-                        mContiguous = false;
-                        break;
-                    }
-                    expectedStride*= dims[dim];
-                }
-            }
-
-            computeSize();
-            if (mImpl) {
-                mImpl->resize(mSize);
-            }
-        }
-    }
+    void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
 
     /**
      * @brief Return if the Tensor object has at leastone element.
@@ -465,95 +424,7 @@ class Tensor : public Data,
         set<expectedType>(getStorageIdx(coordIdx), value);
     }
 
-
-
-    std::string toString() const {
-        AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
-
-        // TODO: move lambda elsewhere?
-        auto ptrToString = [](DataType dt, void* ptr, size_t idx) {
-            switch (dt) {
-            case DataType::Float64:
-                return std::to_string(static_cast<double*>(ptr)[idx]);
-            case DataType::Float32:
-                return std::to_string(static_cast<float*>(ptr)[idx]);
-            case DataType::Float16:
-                return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
-            case DataType::Int8:
-                return std::to_string(static_cast<int8_t*>(ptr)[idx]);
-            case DataType::Int16:
-                return std::to_string(static_cast<int16_t*>(ptr)[idx]);
-            case DataType::Int32:
-                return std::to_string(static_cast<int32_t*>(ptr)[idx]);
-            case DataType::Int64:
-                return std::to_string(static_cast<int64_t*>(ptr)[idx]);
-            case DataType::UInt8:
-                return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
-            case DataType::UInt16:
-                return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
-            case DataType::UInt32:
-                return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
-            case DataType::UInt64:
-                return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
-            default:
-                AIDGE_ASSERT(true, "unsupported type to convert to string");
-            }
-            return std::string("?");  // To make Clang happy
-        };
-
-        if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
-        std::string res;
-        std::size_t dim = 0;
-        std::size_t counter = 0;
-        if (nbDims()>=2) {
-            std::vector<std::size_t> dimVals(nbDims(), 0);
-            res += "{\n";
-            while (counter < mSize) {
-                std::string spaceString = std::string((dim+1)<<1,' ');
-                if (dim < nbDims()-2) {
-                    if (dimVals[dim] == 0) {
-                        res += spaceString + "{\n";
-                        ++dim;
-                    } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
-                        res += spaceString + "},\n" + spaceString + "{\n";
-                        ++dim;
-                    } else {
-                        res += spaceString + "}\n";
-                        dimVals[dim--] = 0;
-                        dimVals[dim]++;
-                    }
-                } else {
-                    for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
-                        res += spaceString + "{";
-                        for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
-                            res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
-                        }
-                        res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
-                        if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
-                            res += ",";
-                        }
-                        res += "\n";
-                    }
-                    if (dim == 0) {
-                        break;
-                    }
-                    dimVals[dim--] = 0;
-                    dimVals[dim]++;
-                }
-            }
-
-            for(int i = static_cast<int>(dim); i > 0; --i) {
-                res += std::string((dim+1)<<1,' ') + "}\n";
-            }
-        } else {
-            res += "{";
-            for (DimSize_t j = 0; j < dims()[0]; ++j) {
-                res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
-            }
-        }
-        res += "}";
-        return res;
-    }
+    std::string toString() const;
 
     inline void print() const { printf("%s\n", toString().c_str()); }
 
@@ -621,7 +492,7 @@ class Tensor : public Data,
     }
 
     /**
-     * Returns a sub-tensor with one or more dimension less.
+     * @brief Returns a sub-tensor with one or more dimension less.
      * For instance, t.extract({1}) on a CHW tensor will return the HW tensor
      * of channel #1.
      * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor
@@ -631,15 +502,15 @@ class Tensor : public Data,
      * tensor is returned.
      * It current tensor was contiguous, the returned tensor is garanteed to be
      * contiguous as well.
-     * 
+     *
      * @param coordIdx Coordinates of the sub-tensor to extract
      * @return Tensor Sub-tensor.
     */
     Tensor extract(const std::vector<std::size_t>& coordIdx) const;
 
     /**
-     * Returns a sub-tensor at some coordinate and with some dimension.
-     * 
+     * @brief Returns a sub-tensor at some coordinate and with some dimension.
+     *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
      * @return Tensor Sub-tensor.
@@ -647,7 +518,7 @@ class Tensor : public Data,
     Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const;
 
     /**
-     * Make the tensor's storage contiguous, if it is not already the case.
+     * @brief Make the tensor's storage contiguous, if it is not already the case.
      * If not contiguous, a new memory space is allocated.
     */
     void makeContiguous();
@@ -704,7 +575,7 @@ class Tensor : public Data,
      * The data type, backend and device stay the same.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
-     * will occur if fallback has already been allocated with the right 
+     * will occur if fallback has already been allocated with the right
      * type/size/device.
      * @return Reference to either itself or to fallback.
     */
@@ -782,10 +653,10 @@ class Tensor : public Data,
     }
 
     /**
-     * Return a reference to a Tensor on desired data type and backend/device:
+     * @brief Return a reference to a Tensor on desired data type and backend/device:
      * - itself, if already with the right characteristics;
      * - the provided Tensor, overwritten with the right characteristics.
-     * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on
+     * @note no data is copy-casted. If it was so in a previous refCastFrom() on
      * the same fallback, it remains valid, otherwise, data is invalid.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
@@ -800,11 +671,11 @@ class Tensor : public Data,
     const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const;
 
     /**
-     * Return a reference to a Tensor with same characteristics
+     * @brief Return a reference to a Tensor with same characteristics
      * (data type, backend/device) as targetReqs Tensor:
      * - itself, if already with the right characteristics;
      * - the provided Tensor, overwritten with the right characteristics.
-     * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on
+     * @note no data is copy-casted. If it was so in a previous refCastFrom() on
      * the same fallback, it remains valid, otherwise, data is invalid.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
@@ -819,7 +690,11 @@ class Tensor : public Data,
     }
 
 private:
-    ///\bug not protected against overflow
+    /**
+     * @brief Compute the number of elements in the Tensor.
+     * @note If dimensions are not empty, they are multiplied to get the total number
+     * of elements. Else, the Tensor represents a scalar and contains a single element.
+     */
     void computeSize() {
         mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
     }
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d45dee563..bcbc59883 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -9,10 +9,145 @@
  *
  ********************************************************************************/
 
+#include <vector>
+#include <cstddef>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
+    bool checkContiguous = true;
+    if (strides.empty()) {
+        strides.resize(dims.size());
+        size_t expectedStride = 1;
+        for (int dim = dims.size() - 1; dim >= 0; --dim) {
+            strides[dim] = expectedStride;
+            expectedStride*= dims[dim];
+        }
+        checkContiguous = false;
+    }
+    else {
+        AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
+    }
+
+    if (mImpl.use_count() > 1) {
+        // Here we could also create a new storage for this tensor in this case
+        // But, is it more likely that the user really wants this, or that he did a mistake?
+        AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
+    }
+    else {
+        mDims = dims;
+        mStrides = strides;
+
+        mContiguous = true;
+        if (checkContiguous) {
+            std::size_t expectedStride = 1;
+            for (std::size_t i = dims.size()-1; i > 0; --i) {
+                if (strides[i] != expectedStride) {
+                    mContiguous = false;
+                    break;
+                }
+                expectedStride*= dims[i];
+            }
+            mContiguous &= (strides[0] == expectedStride);
+        }
+
+        computeSize();
+        if (mImpl) {
+            mImpl->resize(mSize);
+        }
+    }
+}
+
+std::string Aidge::Tensor::toString() const {
+    AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
+
+    // TODO: move lambda elsewhere?
+    auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
+        switch (dt) {
+        case DataType::Float64:
+            return std::to_string(static_cast<double*>(ptr)[idx]);
+        case DataType::Float32:
+            return std::to_string(static_cast<float*>(ptr)[idx]);
+        case DataType::Float16:
+            return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
+        case DataType::Int8:
+            return std::to_string(static_cast<int8_t*>(ptr)[idx]);
+        case DataType::Int16:
+            return std::to_string(static_cast<int16_t*>(ptr)[idx]);
+        case DataType::Int32:
+            return std::to_string(static_cast<int32_t*>(ptr)[idx]);
+        case DataType::Int64:
+            return std::to_string(static_cast<int64_t*>(ptr)[idx]);
+        case DataType::UInt8:
+            return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
+        case DataType::UInt16:
+            return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
+        case DataType::UInt32:
+            return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
+        case DataType::UInt64:
+            return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
+        default:
+            AIDGE_ASSERT(true, "unsupported type to convert to string");
+        }
+        return std::string("?");  // To make Clang happy
+    };
+
+    if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
+    std::string res;
+    std::size_t dim = 0;
+    std::size_t counter = 0;
+    if (nbDims()>=2) {
+        std::vector<std::size_t> dimVals(nbDims(), 0);
+        res += "{\n";
+        while (counter < mSize) {
+            std::string spaceString = std::string((dim+1)<<1,' ');
+            if (dim < nbDims()-2) {
+                if (dimVals[dim] == 0) {
+                    res += spaceString + "{\n";
+                    ++dim;
+                } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
+                    res += spaceString + "},\n" + spaceString + "{\n";
+                    ++dim;
+                } else {
+                    res += spaceString + "}\n";
+                    dimVals[dim--] = 0;
+                    dimVals[dim]++;
+                }
+            } else {
+                for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
+                    res += spaceString + "{";
+                    for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
+                        res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
+                    }
+                    res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
+                    if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
+                        res += ",";
+                    }
+                    res += "\n";
+                }
+                if (dim == 0) {
+                    break;
+                }
+                dimVals[dim--] = 0;
+                dimVals[dim]++;
+            }
+        }
+
+        for(int i = static_cast<int>(dim); i > 0; --i) {
+            res += std::string((dim+1)<<1,' ') + "}\n";
+        }
+    } else {
+        res += "{";
+        for (DimSize_t j = 0; j < dims()[0]; ++j) {
+            res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
+        }
+    }
+    res += "}";
+    return res;
+}
+
 Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
     AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
@@ -52,7 +187,7 @@ void Aidge::Tensor::makeContiguous() {
 
             // Determine the size of the contiguous chunk
             size_t copySize = 1;
-            while (idx + copySize < mSize && 
+            while (idx + copySize < mSize &&
                 getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
             {
                 ++copySize;
-- 
GitLab