-
Maxence Naud authoredMaxence Naud authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Test_MulImpl.cpp 28.97 KiB
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <chrono>
#include <cstddef> // std::size_t
#include <cstdint> // std::uint16_t
#include <memory>
#include <numeric> // std::accumulate
#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution,
// std::uniform_int_distribution
#include <catch2/catch_test_macros.hpp>
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/operator/MulImpl.hpp"
#include "aidge/data/DataType.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/utils/ArrayHelpers.hpp"
#include "aidge/utils/Log.hpp"
#include "aidge/utils/TensorUtils.hpp"
namespace Aidge {
TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
std::shared_ptr<Mul_Op> op = std::make_shared<Mul_Op>();
op->setDataType(DataType::Float32);
op->setBackend("cpu");
// NOTE: The first four tests use fixed values, the last one uses random values but static dimensions.
SECTION("Case 1: 1D and 2D Tensors") {
const auto T0 = std::make_shared<Tensor>(
Array2D<cpptype_t<DataType::Float32>, 2, 3>({{{1, 2, 3}, {4, 5, 6}}}));
const auto T1 =
std::make_shared<Tensor>(Array1D<cpptype_t<DataType::Float32>, 3>({0.1, 0.2, 0.3}));
op->associateInput(0, T0);
op->associateInput(1, T1);
op->getOutput(0)->setGrad(std::make_shared<Tensor>(
Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
op->forwardDims();
op->backward();
const Tensor expectedGrad0 =
Array2D<cpptype_t<DataType::Float32>, 2, 3>({{{0.1, 0.2, 0.3}, {0.1, 0.2, 0.3}}});
const Tensor expectedGrad1 = Array1D<cpptype_t<DataType::Float32>, 3>({5, 7, 9});
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
}
SECTION("Case 2: 3D and 1D tensors") {
const auto T0 = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
{{{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}},
{{7.0, 8.0, 9.0}, {10.0, 11.0, 12.0}}}}));
const auto T1 =
std::make_shared<Tensor>(Array1D<float, 3>({0.3, 0.2, 0.1}));
const auto newGrad = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
{{{{1, 1, 1}, {1, 1, 1}}, {{1, 1, 1}, {1, 1, 1}}}}));
const Tensor expectedGrad0 =
Array3D<float, 2, 2, 3>({{{{0.3, 0.2, 0.1}, {0.3, 0.2, 0.1}},
{{0.3, 0.2, 0.1}, {0.3, 0.2, 0.1}}}});
const Tensor expectedGrad1 = Array1D<cpptype_t<DataType::Float32>, 3>({22.0, 26.0, 30.0});
op->associateInput(0, T0);
op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims();
op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
}
SECTION("Case 3: 4D and 2D tensors") {
const auto T0 = std::make_shared<Tensor>(Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
{{{{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}},
{{10.0, 11.0, 12.0}, {13.0, 14.0, 15.0}, {16.0, 17.0, 18.0}}},
{{{19.0, 20.0, 21.0}, {22.0, 23.0, 24.0}, {25.0, 26.0, 27.0}},
{{28.0, 29.0, 30.0},
{31.0, 32.0, 33.0},
{34.0, 35.0, 36.0}}}}}));
const auto T1 = std::make_shared<Tensor>(Array2D<cpptype_t<DataType::Float32>, 3, 3>(
{{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}}));
const auto newGrad =
std::make_shared<Tensor>(Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
{{{{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}},
{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}},
{{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}},
{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}}}}));
const Tensor expectedGrad0 =
Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
{{{{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}},
{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}},
{{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}},
{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}}}});
const Tensor expectedGrad1 =
Array2D<cpptype_t<DataType::Float32>, 3, 3>({{{58.0, 62.0, 66.0},
{70.0, 74.0, 78.0},
{82.0, 86.0, 90.0}}});
op->associateInput(0, T0);
op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims();
op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
}
SECTION("Case 4: 3D and 2D tensors") {
const auto T0 = std::make_shared<Tensor>(
Array3D<float, 2, 3, 4>({{{
{1.0, 2.0, 3.0, 4.0},
{5.0, 6.0, 7.0, 8.0},
{9.0, 10.0, 11.0, 12.0},
},
{
{13.0, 14.0, 15.0, 16.0},
{17.0, 18.0, 19.0, 20.0},
{21.0, 22.0, 23.0, 24.0},
}}}));
const auto T1 = std::make_shared<Tensor>(
Array2D<cpptype_t<DataType::Float32>, 3, 4>({{{0.1, 0.2, 0.3, 0.4},
{0.5, 0.6, 0.7, 0.8},
{0.9, 1.0, 1.1, 1.2}}}));
const auto newGrad = std::make_shared<Tensor>(
Array3D<cpptype_t<DataType::Float32>, 2, 3, 4>({{{
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
},
{
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
{1.0, 1.0, 1.0, 1.0},
}}}));
const Tensor expectedGrad0 =
Array3D<cpptype_t<DataType::Float32>, 2, 3, 4>({{{{0.1, 0.2, 0.3, 0.4},
{0.5, 0.6, 0.7, 0.8},
{0.9, 1.0, 1.1, 1.2}},
{{0.1, 0.2, 0.3, 0.4},
{0.5, 0.6, 0.7, 0.8},
{0.9, 1.0, 1.1, 1.2}}}});
const Tensor expectedGrad1 =
Array2D<cpptype_t<DataType::Float32>, 3, 4>({{{14.0, 16.0, 18.0, 20.0},
{22.0, 24.0, 26.0, 28.0},
{30.0, 32.0, 34.0, 36.0}}});
op->associateInput(0, T0);
op->associateInput(1, T1);
op->getOutput(0)->setGrad(newGrad);
op->forwardDims();
op->backward();
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
}
SECTION("Case 5: Tensors with random values") {
// Use random values
const std::vector<std::size_t> dims0 = {5, 2, 1, 7}; // First tensor
const std::vector<std::size_t> dims1 = {2, 6, 7}; // Second tensor
const std::vector<std::size_t> outputDims = {5, 2, 6, 7};
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> dist(0.1f, 1.0f);
auto T0 = std::make_shared<Tensor>(dims0);
T0->setDataType(DataType::Float32);
T0->setBackend("cpu");
float* input0Data = static_cast<float*>(T0->getImpl()->rawPtr());
// Fill with random values
for (std::size_t i = 0; i < T0->size(); ++i) {
input0Data[i] = dist(gen);
}
auto T1 = std::make_shared<Tensor>(dims1);
T1->setDataType(DataType::Float32);
T1->setBackend("cpu");
float* input1Data = static_cast<float*>(T1->getImpl()->rawPtr());
// Fill with random values
for (std::size_t i = 0; i < T1->size(); ++i) {
input1Data[i] = dist(gen);
}
op->associateInput(0, T0);
op->associateInput(1, T1);
op->forwardDims();
op->forward();
Tensor expectedOutput{outputDims};
expectedOutput.setBackend("cpu");
float* expectedOutputData = static_cast<float*>(expectedOutput.getImpl()->rawPtr());
for (std::size_t n = 0; n < 5; ++n) {
for (std::size_t c = 0; c < 2; ++c) {
for (std::size_t h = 0; h < 6; ++h) {
for (std::size_t w = 0; w < 7; ++w) {
std::size_t outIdx = w + 7 * (h + 6 * (c + 2 * n));
std::size_t in0Idx =
w + 7 * (0 + 1 * (c + 2 * n)); // middle dim is 1
std::size_t in1Idx =
w + 7 * (h + 6 * c); // no n dimension
expectedOutputData[outIdx] = input0Data[in0Idx] * input1Data[in1Idx];
}
}
}
}
auto outputTensor = op->getOutput(0);
REQUIRE(approxEq<float>(*outputTensor, expectedOutput));
// Backward pass
std::vector<float> gradOutputData(expectedOutput.size());
for (auto &val : gradOutputData) {
val = dist(gen);
}
op->getOutput(0)->setGrad(std::make_shared<Tensor>());
op->getOutput(0)->grad()->resize(outputDims);
op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(),
expectedOutput.size());
// Compute reference gradients
std::vector<float> expectedGrad0(T0->size(), 0.0f);
std::vector<float> expectedGrad1(T1->size(), 0.0f);
for (std::size_t n = 0; n < 5; ++n) {
for (std::size_t c = 0; c < 2; ++c) {
for (std::size_t h = 0; h < 6; ++h) {
for (std::size_t w = 0; w < 7; ++w) {
std::size_t outIdx = w + 7 * (h + 6 * (c + 2 * n));
std::size_t in0Idx = w + 7 * (0 + 1 * (c + 2 * n));
std::size_t in1Idx = w + 7 * (h + 6 * c);
// Gradient for input0: grad_output * input1
expectedGrad0[in0Idx] +=
gradOutputData[outIdx] * input1Data[in1Idx];
// Gradient for input1: grad_output * input0
expectedGrad1[in1Idx] +=
gradOutputData[outIdx] * input0Data[in0Idx];
}
}
}
}
// Perform backward pass
op->backward();
auto expectedGrad0Tensor = std::make_shared<Tensor>();
expectedGrad0Tensor->resize(T0->dims());
expectedGrad0Tensor->setBackend("cpu");
expectedGrad0Tensor->setDataType(DataType::Float32);
expectedGrad0Tensor->getImpl()->setRawPtr(expectedGrad0.data(),
expectedGrad0.size());
auto expectedGrad1Tensor = std::make_shared<Tensor>(T1->dims());
expectedGrad1Tensor->setBackend("cpu");
expectedGrad1Tensor->setDataType(DataType::Float32);
expectedGrad1Tensor->getImpl()->setRawPtr(expectedGrad1.data(),
expectedGrad1.size());
// Verify backward pass
REQUIRE(approxEq<float>(*T0->grad(), *expectedGrad0Tensor));
REQUIRE(approxEq<float>(*T1->grad(), *expectedGrad1Tensor));
// Optional: Print some values for verification
// std::cout << "Input shapes: (" << dims0[0] << "," << dims0[1] <<
// "," << dims0[2] << "," << dims0[3]
// << ") * (" << dims1[0] << "," << dims1[1] << "," <<
// dims1[2]
// << ") -> (" << outputDims[0] << "," << outputDims[1]
// << "," << outputDims[2] << "," << outputDims[3] <<
// ")\n";
// std::cout << "Input sizes: " << input0_size << " * " <<
// input1_size << " -> " << output_size << "\n";
}
}
TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
constexpr std::uint16_t NBTRIALS = 10;
// Create a random number generator
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<float> valueDist(
0.1f,
1.1f); // Random float distribution between 0 and 1
std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2),
std::size_t(10));
std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1),
std::size_t(3));
std::uniform_int_distribution<int> boolDist(0, 1);
std::shared_ptr<Mul_Op> op = std::make_shared<Mul_Op>();
op->setDataType(DataType::Float32);
op->setBackend("cpu");
std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
op->associateInput(0, T0);
T0->setDataType(DataType::Float32);
T0->setBackend("cpu");
std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
op->associateInput(1, T1);
T1->setDataType(DataType::Float32);
T1->setBackend("cpu");
std::shared_ptr<Tensor> Tres = std::make_shared<Tensor>();
Tres->setDataType(DataType::Float32);
Tres->setBackend("cpu");
// To measure execution time of 'MatMul_Op::forward()' member function call
std::chrono::time_point<std::chrono::system_clock> start;
std::chrono::time_point<std::chrono::system_clock> end;
std::chrono::duration<double, std::micro> duration{};
SECTION("MulImpl_cpu::forward()") {
SECTION("Scalar / Scalar") {}
SECTION("Scalar / +1-D Tensor") {}
SECTION("+1-D Tensor / +1-D Tensor - same dimensions") {
std::size_t number_of_operation = 0;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// generate 2 random Tensors
const auto nbDims = nbDimsDist(gen);
auto dims = std::vector<std::size_t>{};
for (std::size_t i = 0; i < nbDims; ++i) {
dims.push_back(dimSizeDist(gen));
}
const auto nb_elements =
std::accumulate(dims.cbegin(),
dims.cend(),
std::size_t(1),
std::multiplies<std::size_t>());
number_of_operation += nb_elements;
// without broadcasting
float *array0 = new float[nb_elements];
float *array1 = new float[nb_elements];
float *result = new float[nb_elements];
for (std::size_t i = 0; i < nb_elements; ++i) {
array0[i] = valueDist(gen);
array1[i] = valueDist(gen);
result[i] = array0[i] * array1[i];
}
// input0
T0->resize(dims);
T0->getImpl()->setRawPtr(array0, nb_elements);
// input1
T1->resize(dims);
T1->getImpl()->setRawPtr(array1, nb_elements);
// results
Tres->resize(dims);
Tres->getImpl()->setRawPtr(result, nb_elements);
op->forwardDims();
start = std::chrono::system_clock::now();
op->forward();
end = std::chrono::system_clock::now();
duration +=
std::chrono::duration_cast<std::chrono::microseconds>(
end - start);
REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
delete[] array0;
delete[] array1;
delete[] result;
}
Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
Log::info("total time: {}μs\n", duration.count());
}
SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
std::size_t number_of_operation = 0;
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// generate 2 random Tensors
// handle dimensions, replace some dimensions with '1' to get
// broadcasting
constexpr std::size_t nbDims = 4;
std::vector<std::size_t> dimensions;
for (std::size_t i = 0; i < nbDims; ++i) {
dimensions.push_back(dimSizeDist(gen));
}
auto dims0 = dimensions;
auto dims1 = dimensions;
auto dimsOut = dimensions;
for (std::size_t i = 0; i < nbDims; ++i) {
if (boolDist(gen)) {
dims0[i] = 1;
}
if (boolDist(gen)) {
dims1[i] = 1;
}
dimsOut[i] = (dims0[i] == 1) ? dims1[i] : dims0[i];
}
for (auto dim : dims0) {
Log::info("Dimension of input 0 : {}", dim);
}
for (auto dim : dims1) {
Log::info("Dimension of input 1 : {}", dim);
}
// create arrays and fill them with random values
float *array0 =
new float[dims0[0] * dims0[1] * dims0[2] * dims0[3]];
float *array1 =
new float[dims1[0] * dims1[1] * dims1[2] * dims1[3]];
float *result = new float[dimsOut[0] * dimsOut[1] *
dimsOut[2] * dimsOut[3]];
for (std::size_t i = 0;
i < dims0[0] * dims0[1] * dims0[2] * dims0[3];
++i) {
array0[i] = valueDist(gen);
}
for (std::size_t i = 0;
i < dims1[0] * dims1[1] * dims1[2] * dims1[3];
++i) {
array1[i] = valueDist(gen);
}
// compute true result
const std::size_t strides0[nbDims] = {
dims0[1] * dims0[2] * dims0[3],
dims0[2] * dims0[3],
dims0[3],
1};
const std::size_t strides1[nbDims] = {
dims1[1] * dims1[2] * dims1[3],
dims1[2] * dims1[3],
dims1[3],
1};
for (std::size_t a = 0; a < dimsOut[0]; ++a) {
for (std::size_t b = 0; b < dimsOut[1]; ++b) {
const std::size_t idx0_0 =
strides0[0] * ((dims0[0] > 1) ? a : 0) +
strides0[1] * ((dims0[1] > 1) ? b : 0);
const std::size_t idx1_0 =
strides1[0] * ((dims1[0] > 1) ? a : 0) +
strides1[1] * ((dims1[1] > 1) ? b : 0);
for (std::size_t c = 0; c < dimsOut[2]; ++c) {
const std::size_t idx_out =
dimsOut[3] *
(c + dimsOut[2] * (b + dimsOut[1] * a));
for (std::size_t d = 0; d < dimsOut[3]; ++d) {
std::size_t idx0 =
idx0_0 +
strides0[2] * ((dims0[2] > 1) ? c : 0) +
((dims0[3] > 1) ? d : 0);
std::size_t idx1 =
idx1_0 +
strides1[2] * ((dims1[2] > 1) ? c : 0) +
((dims1[3] > 1) ? d : 0);
result[idx_out + d] =
array0[idx0] * array1[idx1];
// std::cout << "(" << idx0 << ", " << idx1 <<
// ") -> " << array0[idx0] << " * " <<
// array1[idx1] << " -> " << idx_out + d <<
// std::endl;
}
}
}
}
// conversion to Aidge::Tensors
// input0
T0->resize(dims0);
T0->getImpl()->setRawPtr(
array0,
dims0[0] * dims0[1] * dims0[2] * dims0[3]);
// input1
T1->resize(dims1);
T1->getImpl()->setRawPtr(
array1,
dims1[0] * dims1[1] * dims1[2] * dims1[3]);
// results
Tres->resize(dimsOut);
Tres->getImpl()->setRawPtr(
result,
dimsOut[0] * dimsOut[1] * dimsOut[2] * dimsOut[3]);
// compute result
op->forwardDims();
start = std::chrono::system_clock::now();
op->forward();
end = std::chrono::system_clock::now();
duration +=
std::chrono::duration_cast<std::chrono::microseconds>(
end - start);
// comparison between truth and computed result
REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
delete[] array0;
delete[] array1;
delete[] result;
const std::size_t nb_elements =
std::accumulate(dimsOut.cbegin(),
dimsOut.cend(),
std::size_t(1),
std::multiplies<std::size_t>());
number_of_operation += nb_elements;
}
Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
Log::info("total time: {}μs\n", duration.count());
}
SECTION("+1-D Tensor / 1-D Tensor") {
std::size_t number_of_operation = 0;
std::uniform_int_distribution<std::size_t> nbRemovedDimsDist(
std::size_t(1),
std::size_t(3));
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
// generate 2 random Tensors
// handle dimensions
constexpr std::size_t nbDims = 4;
std::vector<std::size_t> dims0(4);
for (std::size_t i = 0; i < nbDims; ++i) {
dims0[i] = dimSizeDist(gen);
}
std::vector<std::size_t> dimsOut = dims0;
std::vector<std::size_t> dims1 = dims0;
for (std::size_t i = 0; i < nbDims; ++i) {
if (boolDist(gen)) {
dims1[i] = 1;
}
}
dims1.erase(dims1.cbegin(),
dims1.cbegin() + nbRemovedDimsDist(gen));
// create arrays and fill them with random values
float *array0 =
new float[dims0[0] * dims0[1] * dims0[2] * dims0[3]];
std::size_t array1_size =
std::accumulate(dims1.cbegin(),
dims1.cend(),
std::size_t(1),
std::multiplies<std::size_t>());
float *array1 = new float[array1_size];
float *result = new float[dimsOut[0] * dimsOut[1] *
dimsOut[2] * dimsOut[3]];
for (std::size_t i = 0;
i < (dims0[0] * dims0[1] * dims0[2] * dims0[3]);
++i) {
array0[i] = valueDist(gen);
}
for (std::size_t i = 0; i < array1_size; ++i) {
array1[i] = valueDist(gen);
}
// compute true result
auto dims1_tmp = dims1;
dims1_tmp.insert(dims1_tmp.cbegin(),
4 - dims1_tmp.size(),
std::size_t(1));
const std::size_t strides0[nbDims] = {
dims0[1] * dims0[2] * dims0[3],
dims0[2] * dims0[3],
dims0[3],
1};
const std::size_t strides1[nbDims] = {
dims1_tmp[1] * dims1_tmp[2] * dims1_tmp[3],
dims1_tmp[2] * dims1_tmp[3],
dims1_tmp[3],
1};
for (std::size_t a = 0; a < dimsOut[0]; ++a) {
for (std::size_t b = 0; b < dimsOut[1]; ++b) {
const std::size_t idx0_0 =
strides0[0] * ((dims0[0] > 1) ? a : 0) +
strides0[1] * ((dims0[1] > 1) ? b : 0);
const std::size_t idx1_0 =
strides1[0] * ((dims1_tmp[0] > 1) ? a : 0) +
strides1[1] * ((dims1_tmp[1] > 1) ? b : 0);
for (std::size_t c = 0; c < dimsOut[2]; ++c) {
const std::size_t idx_out =
dimsOut[3] *
(c + dimsOut[2] * (b + dimsOut[1] * a));
for (std::size_t d = 0; d < dimsOut[3]; ++d) {
std::size_t idx0 =
idx0_0 +
strides0[2] * ((dims0[2] > 1) ? c : 0) +
((dims0[3] > 1) ? d : 0);
std::size_t idx1 =
idx1_0 +
strides1[2] *
((dims1_tmp[2] > 1) ? c : 0) +
((dims1_tmp[3] > 1) ? d : 0);
result[idx_out + d] =
array0[idx0] * array1[idx1];
// std::cout << "(" << idx0 << ", " << idx1 <<
// ") -> " << array0[idx0] << " * " <<
// array1[idx1] << " -> " << idx_out + d <<
// std::endl;
}
}
}
}
// conversion to Aidge::Tensors
// input0
T0->resize(dims0);
T0->getImpl()->setRawPtr(
array0,
dims0[0] * dims0[1] * dims0[2] * dims0[3]);
// input1
T1->resize(dims1);
T1->getImpl()->setRawPtr(array1, array1_size);
// results
Tres->resize(dimsOut);
Tres->getImpl()->setRawPtr(
result,
dimsOut[0] * dimsOut[1] * dimsOut[2] * dimsOut[3]);
// compute result
op->forwardDims();
start = std::chrono::system_clock::now();
op->forward();
end = std::chrono::system_clock::now();
duration +=
std::chrono::duration_cast<std::chrono::microseconds>(
end - start);
// comparison between truth and computed result
REQUIRE(approxEq<float>(*(op->getOutput(0)), *Tres));
delete[] array0;
delete[] array1;
delete[] result;
const std::size_t nb_elements =
std::accumulate(dimsOut.cbegin(),
dimsOut.cend(),
std::size_t(1),
std::multiplies<std::size_t>());
number_of_operation += nb_elements;
}
Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
Log::info("total time: {}μs\n", duration.count());
}
}
}
} // namespace Aidge