Newer
Older
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include <cstdlib>
#include "aidge/data/Tensor.hpp"
#include "aidge/data/TensorImpl.hpp"
#include "aidge/operator/AvgPooling.hpp"
TEST_CASE("[cpu/operator] AvgPooling(forward)") {
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float,2,2,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}}
},
{
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
SECTION("Stride") {
std::shared_ptr<Node> myAvgPool = AvgPooling({2,2}, "mycdw", {2,2});
myAvgPool->getOperator()->setDatatype(DataType::Float32);
myAvgPool->getOperator()->setBackend("cpu");
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> {
{
{
{{ 3, 5},
{ 13, 15}},
{{ 28, 30},
{ 38, 40}}
},
{
{{103, 105},
{113, 115}},
{{128, 130},
{138, 140}}
}
}
});
myAvgPool->getOperator()->associateInput(0,myInput);
myAvgPool->getOperator()->computeOutputDims();
myAvgPool->forward();
myAvgPool->getOperator()->getOutput(0)->print();
REQUIRE(*(myAvgPool->getOperator()->getOutput(0)) == *myOutput);
}
SECTION("Stride >= feature dim") {
std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(Array4D<float,1,1,3,3> { //NCHW
{
{
{{0.3745, 0.9507, 0.7320},
{0.5987, 0.1560, 0.1560},
{0.0581, 0.8662, 0.6011}}
}
}
});
std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mycdw", {3,3});
myAvgPool->getOperator()->setDatatype(DataType::Float32);
myAvgPool->getOperator()->setBackend("cpu");
Tensor myOutput = Array4D<float,1,1,1,1> {
{{{{(0.3745 + 0.9507 + 0.7320 + 0.5987 + 0.1560 + 0.1560 + 0.0581 + 0.8662 + 0.6011)/9.0}}}}
};
myAvgPool->getOperator()->associateInput(0,myInput2);
myAvgPool->getOperator()->computeOutputDims();
myAvgPool->forward();
myAvgPool->getOperator()->getOutput(0)->print();
float* outPtr = static_cast<float*>(myAvgPool->getOperator()->output(0).getImpl()->rawPtr());
float* expectedOutPtr = static_cast<float*>(myOutput.getImpl()->rawPtr());
for (std::size_t i = 0; i < 1; ++i) {
REQUIRE(std::abs(outPtr[i] - expectedOutPtr[i]) < 0.00001);
}
}
// std::cout << static_cast<Tensor>((*myAvgPool->getOperator())["weight"])[0][0][0][0] << std::endl;
}