Skip to content
Snippets Groups Projects
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Test_TensorImpl.cpp 7.73 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include <catch2/catch_test_macros.hpp>
#include <cstddef>   // std::size_t
#include <cstdint>   // std::uint16_t
#include <chrono>
#include <iostream>
#include <memory>
#include <numeric>   // std::accumulate
#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution

#include "aidge/data/Tensor.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/backend/cpu/operator/AddImpl.hpp"

namespace Aidge {

TEST_CASE("Test addition of Tensors","[TensorImpl][Add]") {
    constexpr std::uint16_t NBTRIALS = 10;
    // Create a random number generator
    std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
    std::uniform_int_distribution<int> boolDist(0,1);

    // Create MatMul Operator
    std::shared_ptr<Node> mySub = Add(2);
    auto op = std::static_pointer_cast<OperatorTensor>(mySub-> getOperator());
    op->setDataType(DataType::Float32);
    op->setBackend("cpu");

    // Create 2 input Tensors
    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
    op->associateInput(0,T0);
    T0->setDataType(DataType::Float32);
    T0->setBackend("cpu");
    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
    op -> associateInput(1,T1);
    T1->setDataType(DataType::Float32);
    T1->setBackend("cpu");

    // Create results Tensor
    Tensor Tres{};
    Tres.setDataType(DataType::Float32);
    Tres.setBackend("cpu");

    // To measure execution time of 'MatMul_Op::forward()' member function call
    std::chrono::time_point<std::chrono::system_clock> start;
    std::chrono::time_point<std::chrono::system_clock> end;
    std::chrono::duration<double, std::micro> duration{};

    std::size_t number_of_operation = 0;

    for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
        // generate 2 random Tensors
        // handle dimensions, replace some dimensions with '1' to get broadcasting
        constexpr std::size_t nbDims = 4;
        std::vector<std::size_t> dims;
        for (std::size_t i = 0; i < nbDims; ++i) {