diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp index a1fbfa336ae2148f687d5181a77e0dafa7466cf2..78efc4a29f5aef4395b556d23d99da7609ff762c 100644 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ b/include/aidge/backend/cpu/data/TensorImpl.hpp @@ -183,10 +183,18 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float32( {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create); static Registrar<Tensor> registrarTensorImpl_cpu_Float16( {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create); -static Registrar<Tensor> registrarTensorImpl_cpu_Int32( - {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create); static Registrar<Tensor> registrarTensorImpl_cpu_Int64( {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_Int32( + {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_Int16( + {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_UInt16( + {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_Int8( + {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_UInt8( + {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create); } // namespace } // namespace Aidge diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp index bd30bce830d2a04f3c867f6997cfc462d040b44e..e734fcd7770483dbcd9f594847ffd4297c071e68 100644 --- a/unit_tests/data/Test_TensorImpl.cpp +++ b/unit_tests/data/Test_TensorImpl.cpp @@ -59,6 +59,33 @@ TEST_CASE("[core/data] Tensor creation") { } } +TEST_CASE("Tensor fill") { + SECTION("Instantiate batches independantly") { + // initialization with 0s + std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{}); + //concatenatedTensor->print(); + + std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}}); + std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}}); + std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}}); + + // use copy function from implementation + concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0); + concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5); + concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10); + // concatenatedTensor->print(); + + std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{ + {{1,2,3,4,5}, + {6,7,8,9,10}, + {11,12,13,14,15}} + }); + // expectedTensor->print(); + + REQUIRE(*concatenatedTensor == *expectedTensor); + } +} + TEST_CASE("[core/data] Tensor methods","[Tensor]") { Tensor x = Array3D<int, 2, 2, 2>{{ {{1, 2},