From 579e98c764cd8ae87f3db91d84adcffd0e999ec0 Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Mon, 19 Feb 2024 09:52:04 +0000
Subject: [PATCH] [Add] copy new lines that were added in backend_cpu

---
 include/aidge/backend/cpu/data/TensorImpl.hpp | 12 +++++++--
 unit_tests/data/Test_TensorImpl.cpp           | 27 +++++++++++++++++++
 2 files changed, 37 insertions(+), 2 deletions(-)

diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index a1fbfa336..78efc4a29 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -183,10 +183,18 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
         {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
         {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
-        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
         {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
+        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
+        {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
+        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
+        {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
+        {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
index bd30bce83..e734fcd77 100644
--- a/unit_tests/data/Test_TensorImpl.cpp
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -59,6 +59,33 @@ TEST_CASE("[core/data] Tensor creation") {
   }
 }
 
+TEST_CASE("Tensor fill") {
+  SECTION("Instantiate batches independantly") {
+    // initialization with 0s
+    std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
+    //concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
+    std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
+    std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
+
+    // use copy function from implementation
+    concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
+    concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
+    concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
+    // concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
+      {{1,2,3,4,5},
+      {6,7,8,9,10},
+      {11,12,13,14,15}}
+    });
+    // expectedTensor->print();
+
+    REQUIRE(*concatenatedTensor == *expectedTensor);
+  }
+}
+
 TEST_CASE("[core/data] Tensor methods","[Tensor]") {
   Tensor x = Array3D<int, 2, 2, 2>{{
     {{1, 2},
-- 
GitLab