From 0167d1dcf8c4819aa304a796f7e9d7b22f0e1303 Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Thu, 29 Feb 2024 18:03:09 +0000
Subject: [PATCH] Add tests for nearly evry member function of Tensor (except
 makeContiguous(), toString() and empty())

---
 unit_tests/data/Test_Tensor.cpp | 396 +++++++++++++++++++++++++++++++-
 1 file changed, 389 insertions(+), 7 deletions(-)

diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 1104cda98..f26901dc3 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -10,23 +10,405 @@
  ********************************************************************************/
 
 #include <array>
-#include <cstddef>
-#include <cstdint>  //std::uint16_t
-#include <random>
+#include <cstddef>     // std::size_t
+#include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
+#include <numeric>     // std::accumulate, std::inner_product
+#include <functional>  // std::multiplies
+#include <random>      // std::random_device, std::mt19937,
+                       // std::uniform_int_distribution, std::uniform_real_distribution
+#include <set>
+#include <string>
 #include <vector>
 
 #include <catch2/catch_test_macros.hpp>
 
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
-#include "aidge/utils/TensorUtils.hpp"
 #include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
 
 namespace Aidge {
 
-TEST_CASE("[backend/cpu/data] Tensor", "[Tensor]") {
-    SECTION("Constructor") {
+TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
+    SECTION("Default constructor") {
+        Tensor T_default{};
+        REQUIRE((
+            (T_default.dataType() == DataType::Float32) &&
+            (T_default.size() == 1) &&
+            (T_default.dims() == std::vector<DimSize_t>({})) &&
+            (T_default.strides() == std::vector<DimSize_t>({1})) &&
+            (T_default.getImpl() == nullptr) &&
+            (T_default.grad() == nullptr) &&
+            (T_default.isContiguous() == true)
+        ));
+    }
+    SECTION("scalar constructor") {
+        Tensor T;
+        REQUIRE_NOTHROW(T = Tensor(std::int32_t(20)));
+        REQUIRE((
+            (T.dataType() == DataType::Int32) &&
+            (T.size() == 1) &&
+            (T.dims() == std::vector<DimSize_t>({})) &&
+            (T.strides() == std::vector<DimSize_t>({1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+    }
+    SECTION("dim constructor") {
+        const std::vector<DimSize_t> Tdims = {1,2,3,4,5,6,7};
+        Tensor T;
+        REQUIRE_NOTHROW(T = Tensor(Tdims));
+        REQUIRE((
+            (T.dataType() == DataType::Float32) &&
+            (T.size() == std::accumulate(Tdims.cbegin(), Tdims.cend(), DimSize_t(1), std::multiplies<DimSize_t>())) &&
+            (T.dims() == Tdims) &&
+            (T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
+            (T.getImpl() == nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+    }
+    SECTION("TensorUtils, constructor from const arrays") {
+        Tensor T;
+        // Construction from different types and sizes
+
+        // Set an already constructed Tensor
+        REQUIRE_NOTHROW(T = Array1D<int, 2>{{1, 2}});
+        REQUIRE((
+            (T.dataType() == DataType::Int32) &&
+            (T.size() == 2) &&
+            (T.dims() == std::vector<DimSize_t>({2})) &&
+            (T.strides() == std::vector<DimSize_t>({1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+
+        // Change dims
+        REQUIRE_NOTHROW(T = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
+        // Change data types
+        REQUIRE_NOTHROW(T = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
+        REQUIRE((
+            (T.dataType() == DataType::UInt8) &&
+            (T.size() == 8) &&
+            (T.dims() == std::vector<DimSize_t>({2,2,2})) &&
+            (T.strides() == std::vector<DimSize_t>({4,2,1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+        REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
+        REQUIRE_NOTHROW(T = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
+        REQUIRE_NOTHROW(T = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
+
+        // Change dims
+        REQUIRE_NOTHROW(T = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
+                                                    {{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
+        REQUIRE((
+            (T.dataType() == DataType::Int32) &&
+            (T.size() == 16) &&
+            (T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
+            (T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+    }
+    SECTION("copy constructor / copy assignment operator") {
+
+    }
+    SECTION("move constructor / move assignment operator") {
+
+    }
+    SECTION("prototype") {
+        constexpr std::uint16_t NBTRIALS = 10;
+
+        // Create random number generators
+        std::random_device rd;
+        std::mt19937 gen(rd());
+        std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+        std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+        std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
+
+        for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
+            std::vector<std::size_t> Tdims;
+            const std::size_t Tsize = nbDimsDist(gen);
+            for (std::size_t i = 0; i < Tsize; ++i) {
+                Tdims.push_back(dimsDist(gen));
+            }
+            Tensor T(Tdims);
+
+            // file the tensor
+            float* array0 = new float[T.size()];
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                array0[i] = valueDist(gen);
+            }
+            T.setBackend("cpu");
+            T.getImpl() -> setRawPtr(array0, T.size());
+
+            Tensor Tclone;
+            REQUIRE_NOTHROW(Tclone = T.clone());
+            REQUIRE((
+                (T.dataType() == Tclone.dataType()) &&
+                (T.size() == Tclone.size()) &&
+                (T.dims() == Tclone.dims()) &&
+                (T.strides() == Tclone.strides()) &&
+                (T.getImpl() != Tclone.getImpl()) &&
+                (Tclone.grad() == nullptr) &&
+                (Tclone.isContiguous() == true)
+            ));
+            REQUIRE(Tclone == T);
+        }
+    }
+}
+
+TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create random number generators
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+    std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
+
+    for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
+        std::vector<std::size_t> Tdims;
+        const std::size_t Tsize = nbDimsDist(gen);
+        for (std::size_t i = 0; i < Tsize; ++i) {
+            Tdims.push_back(dimsDist(gen));
+        }
+
+        // create Tensor
+        Tensor T(Tdims);
+        // compute stride
+        std::vector<std::size_t> Tstrides(Tdims.size(), 1);
+        std::size_t i = Tdims.size() - 1;
+        while (i-- > 0) {
+            Tstrides[i] = Tstrides[i+1]*Tdims[i+1];
+        }
+
+    /////////////////
+    // dimensions
+        // nbDims(), dims(), size()
+        REQUIRE(T.nbDims() == Tdims.size());
+
+        REQUIRE(T.dims() == Tdims);
+
+        std::size_t trueSize = std::accumulate(Tdims.cbegin(), Tdims.cend(), 1, std::multiplies<std::size_t>());
+        REQUIRE(T.size() == trueSize);
+
+    /////////////////
+    // implementation
+        // getImpl(), setImpl(), hasImpl()
+        REQUIRE(T.hasImpl() == false);
+        std::shared_ptr<TensorImpl_cpu<float>> tensorImpl = std::make_shared<TensorImpl_cpu<float>>(0, Tdims);
+
+        T.setImpl(tensorImpl);
+        REQUIRE(T.getImpl() == tensorImpl);
+        REQUIRE(T.hasImpl() == true);
+
+        // isContiguous(), stride(),
+        REQUIRE(T.isContiguous());
+        REQUIRE(T.strides() == Tstrides);
+
+        // file the tensor
+        float* array0 = new float[T.size()];
+        for (std::size_t i = 0; i < T.size(); ++i) {
+            array0[i] = valueDist(gen);
+        }
+        tensorImpl -> setRawPtr(array0, T.size());
+
+        // getCoord(), getIdx(), getStorageIdx()
+        std::vector<DimSize_t> Tdims_copy = Tdims;
+        for (auto& val : Tdims_copy) {
+            val = std::min(DimSize_t(2), std::max(DimSize_t(0), val - 1));
+        }
+        DimSize_t true_flatid = std::inner_product(Tdims_copy.cbegin(), Tdims_copy.cend(), Tstrides.cbegin(), DimSize_t(0));
+
+        REQUIRE(T.getCoord(true_flatid) == Tdims_copy);
+        REQUIRE(T.getIdx(Tdims_copy) == true_flatid);
+        REQUIRE(T.getStorageIdx(Tdims_copy) == true_flatid); // Tensor is not a view
+
+        // set(vector), set(size_t), get(vector), get(size_t), getImplOffset()
+        REQUIRE_NOTHROW(T.set<float>(Tdims_copy, 50.0f));
+        REQUIRE(T.get<float>(Tdims_copy) == 50.0f);
+
+        REQUIRE_NOTHROW(T.set<float>(true_flatid, 40.0f));
+        REQUIRE(T.get<float>(true_flatid) == 40.0f);
+        REQUIRE(T.getImplOffset() == 0);
+
+        delete[] array0;
+
+    //////////////
+    // backend
+        // getAvailableBackends()
+        REQUIRE(Tensor::getAvailableBackends() == std::set<std::string>({"cpu"}));
+
+        // setBackend()
+        REQUIRE_NOTHROW(T.setBackend("cpu", 0));
+
+        // setDataType(), dataType()
+        REQUIRE_NOTHROW(T.setDataType(DataType::Int16));
+        REQUIRE(T.dataType() == DataType::Int16);
+    }
+}
+TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
+	// extract, makeContiguous
+	// empty
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create random number generators
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+    std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
+    // zeros, resize
+    SECTION("zeros") {
+        Tensor T;
+        for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
+            std::vector<std::size_t> Tdims;
+            const std::size_t Tsize = nbDimsDist(gen);
+            for (std::size_t i = 0; i < Tsize; ++i) {
+                Tdims.push_back(dimsDist(gen));
+            }
+            T.resize(Tdims);
+
+            // file the tensor
+            float* array0 = new float[T.size()];
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                array0[i] = valueDist(gen);
+            }
+            T.setBackend("cpu");
+            T.getImpl() -> setRawPtr(array0, T.size());
+            float* res = static_cast<float*>(T.getImpl()->hostPtr());
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                REQUIRE(res[i] == array0[i]);
+            }
+
+            T.zeros();
+            res = static_cast<float*>(T.getImpl()->hostPtr());
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                REQUIRE(res[i] == 0.0f);
+            }
+            delete[] array0;
+        }
+    }
+
+    SECTION("Tensor extract") {
+        bool equal;
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // create Tensor
+            const std::size_t nb_dims = 3;
+            const std::size_t dim0 = dimsDist(gen) + 1; // dim0 >= 2
+            const std::size_t dim1 = dimsDist(gen) + 1;
+            const std::size_t dim2 = dimsDist(gen) + 1;
+            std::vector<std::size_t> dims = {dim0, dim1, dim2};
+            int array0[dim0*dim1*dim2];
+            for (std::size_t i = 0; i < dim0; ++i) {
+                for (std::size_t j = 0; j < dim1; ++j) {
+                    for (std::size_t k = 0; k < dim2; ++k) {
+                        array0[((i * dim1) + j)*dim2 + k] = valueDist(gen);
+                    }
+                }
+            }
+            Tensor x{dims};
+            x.setDataType(DataType::Int32);
+            x.setBackend("cpu");
+            Tensor y;
+            Tensor y0;
+            Tensor y1;
+            Tensor y2;
+            Tensor y3;
+            x.getImpl()->setRawPtr(&array0, dim0*dim1*dim2);
+            REQUIRE(x.isContiguous());
+
+        ////////////////
+        // extract contiguous Tensor slice given start coordinates
+            // the whole Tensor
+            REQUIRE_NOTHROW(y0 = x.extract({}));
+            REQUIRE(y0 == x);
+            int* y0_res = static_cast<int*>(y0.getImpl()->hostPtr());
+            equal = true;
+            for (std::size_t i = 0; i < dim0*dim1*dim2; ++i) {
+                equal &= (y0_res[i] == array0[i]);
+            }
+            REQUIRE(equal);
+            REQUIRE(y0.getImpl() == x.getImpl());
+            REQUIRE(y0.isContiguous());
+
+            // Tensor - 1-D
+            REQUIRE_NOTHROW(y1 = x.extract({dim0 - 2}));
+            int* y1_res = static_cast<int*>(y1.getImpl()->hostPtr());
+            equal = true;
+            for (std::size_t i = 0; i < dim1*dim2; ++i) {
+                equal &= (y1_res[i] == array0[(dim0-2)*dim1*dim2 + i]);
+            }
+            REQUIRE(equal);
+            REQUIRE(y1.getImpl() == x.getImpl());
+            REQUIRE(y1.isContiguous());
+
+            // Tensor - 2-D
+            REQUIRE_NOTHROW(y2 = x.extract({dim0 - 2, dim1 - 2}));
+            int* y2_res = static_cast<int*>(y2.getImpl()->hostPtr());
+            equal = true;
+            for (std::size_t i = 0; i < dim2; ++i) {
+                equal &= (y2_res[i] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + i]);
+            }
+            REQUIRE(equal);
+            REQUIRE(y2.getImpl() == x.getImpl());
+            REQUIRE(y2.isContiguous());
+
+            // Tensor - 3-D => scalar
+            REQUIRE_NOTHROW(y3 = x.extract({dim0 - 2, dim1 - 2, dim2 - 2}));
+            int* y3_res = static_cast<int*>(y3.getImpl()->hostPtr());
+            REQUIRE(y3_res[0] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + dim2 - 2]);
+            REQUIRE(y3.getImpl() == x.getImpl());
+            REQUIRE(y3.isContiguous());
+
+            // throw an error
+            REQUIRE_THROWS(y = x.extract({0, dim1, 0}));
+
+        /////////////////
+        // extract Tensor slice given start coordinates and dimension
+            REQUIRE_NOTHROW(y = x.extract({0, 0, 1}, {dim0-1, 1, dim2-1}));
+            REQUIRE(y.getImpl() == x.getImpl()); // shared implem
+            REQUIRE(!y.isContiguous());
+
+            Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
+            REQUIRE(yClone.isContiguous());
+            // int yTruth[2][1][1] =
+            REQUIRE(approxEq<int>(yClone, y, 0.0f, 0.0f));
+        }
+    }
 
+    // print, toString,
+    SECTION("Pretty printing for debug") {
+        Tensor x{};
+        // Empty Tensor
+        REQUIRE_THROWS(x.print());
+        // scalar
+        x = Tensor(42);
+        REQUIRE_NOTHROW(x.print());
+        // 1-D Tensors
+        x = Array1D<int, 1>{{1}};
+        REQUIRE_NOTHROW(x.print());
+        x = Array1D<int, 6>{{1,2,3,4,5,6}};
+        REQUIRE_NOTHROW(x.print());
+        // 2-D Tensors
+        x = Array2D<int, 3, 2>{{{1, 2}, {3, 4}, {5, 6}}};
+        REQUIRE_NOTHROW(x.print());
+        // +2-D Tensors
+        x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+        REQUIRE_NOTHROW(x.print());
+        x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},{{{11, 12}, {13, 14}}, {{15, 16}, {17, 18}}}}};
+        REQUIRE_NOTHROW(x.print());
     }
 }
 
-} // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
-- 
GitLab