/******************************************************************************** * Copyright (c) 2023 CEA-List * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0. * * SPDX-License-Identifier: EPL-2.0 * ********************************************************************************/ #include <array> #include <catch2/catch_test_macros.hpp> #include "Test_cuda.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/backend/cpu.hpp" #include "aidge/backend/cuda.hpp" using namespace Aidge; TEST_CASE("[gpu/operator] FC(forward)", "[FC][GPU]") { std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<float, 5, 75>{ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}}); std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<float, 5>{{1, 2, 3, 4, 5}}); std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<float, 2, 5>{ {{23601, 23602, 23603, 23604, 23605}, {68601, 68602, 68603, 68604, 68605}}}); myWeights->setBackend("cuda"); myBias->setBackend("cuda"); std::shared_ptr<Node> myFC = FC(75, 5, false, "myfc"); auto op = std::static_pointer_cast<OperatorTensor>(myFC -> getOperator()); op -> associateInput(1, myWeights); op -> associateInput(2, myBias); SECTION("2D input") { std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<float, 2, 75>{ {{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74}, {75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}}); myInput->setBackend("cuda"); op->associateInput(0, myInput); op -> setDataType(DataType::Float32); op -> setBackend("cuda"); op->computeOutputDims(); myFC->forward(); float* computedOutput = new float[myOutput->size()](); cudaMemcpy(computedOutput, op->getOutput(0)->getImpl()->rawPtr(), sizeof(float) * myOutput->size(), cudaMemcpyDeviceToHost); for(int i = 0; i < myOutput->size(); i++){ const float targetOutput = *(static_cast<float*>(myOutput->getImpl()->rawPtr()) + i); REQUIRE(fabs(computedOutput[i] - targetOutput) < 1e-6); } delete[] computedOutput; } SECTION("4D input") { std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4}, {5, 6, 7, 8, 9}, {10, 11, 12, 13, 14}, {15, 16, 17, 18, 19}, {20, 21, 22, 23, 24}}, {{25, 26, 27, 28, 29}, {30, 31, 32, 33, 34}, {35, 36, 37, 38, 39}, {40, 41, 42, 43, 44}, {45, 46, 47, 48, 49}}, {{50, 51, 52, 53, 54}, {55, 56, 57, 58, 59}, {60, 61, 62, 63, 64}, {65, 66, 67, 68, 69}, {70, 71, 72, 73, 74}}}, {{{75, 76, 77, 78, 79}, {80, 81, 82, 83, 84}, {85, 86, 87, 88, 89}, {90, 91, 92, 93, 94}, {95, 96, 97, 98, 99}}, {{100, 101, 102, 103, 104}, {105, 106, 107, 108, 109}, {110, 111, 112, 113, 114}, {115, 116, 117, 118, 119}, {120, 121, 122, 123, 124}}, {{125, 126, 127, 128, 129}, {130, 131, 132, 133, 134}, {135, 136, 137, 138, 139}, {140, 141, 142, 143, 144}, {145, 146, 147, 148, 149}}}}}); myInput->setBackend("cuda"); op->associateInput(0, myInput); op -> setDataType(DataType::Float32); op -> setBackend("cuda"); op->computeOutputDims(); myFC->forward(); float* computedOutput = new float[myOutput->size()](); cudaMemcpy(computedOutput, op->getOutput(0)->getImpl()->rawPtr(), sizeof(float) * myOutput->size(), cudaMemcpyDeviceToHost); for(int i = 0; i < myOutput->size(); i++){ const float targetOutput = *(static_cast<float*>(myOutput->getImpl()->rawPtr()) + i); REQUIRE(fabs(computedOutput[i] - targetOutput) < 1e-6); } delete[] computedOutput; } }