Skip to content
Snippets Groups Projects
Commit 570d537c authored by Vincent Templier's avatar Vincent Templier
Browse files

Add tests for CPU library

parent ddc91ac9
No related branches found
No related tags found
No related merge requests found
...@@ -49,4 +49,8 @@ if (NOT BUILD_CPU_ALONE) ...@@ -49,4 +49,8 @@ if (NOT BUILD_CPU_ALONE)
set_target_properties(cpu PROPERTIES COTIRE_ADD_UNITY_BUILD FALSE) set_target_properties(cpu PROPERTIES COTIRE_ADD_UNITY_BUILD FALSE)
# set_target_properties(n2d2_cpu_lib PROPERTIES COTIRE_CXX_PREFIX_HEADER_INIT "include/utils/Precompiled.hpp") # set_target_properties(n2d2_cpu_lib PROPERTIES COTIRE_CXX_PREFIX_HEADER_INIT "include/utils/Precompiled.hpp")
cotire(cpu) cotire(cpu)
endif() endif()
\ No newline at end of file
if (TESTS)
add_subdirectory(tests)
endif()
enable_testing()
Include(FetchContent)
FetchContent_Declare(
Catch2
GIT_REPOSITORY https://github.com/catchorg/Catch2.git
GIT_TAG v3.0.1 # or a later release
)
FetchContent_MakeAvailable(Catch2)
file(GLOB_RECURSE src_files "*.cpp")
add_executable(tests_cpu ${src_files})
target_link_libraries(tests_cpu PUBLIC cpu)
target_link_libraries(tests_cpu PRIVATE Catch2::Catch2WithMain)
list(APPEND CMAKE_MODULE_PATH ${catch2_SOURCE_DIR}/extras)
include(CTest)
include(Catch)
catch_discover_tests(tests_cpu)
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include <string>
#include "data/Tensor.hpp"
#include "graph/GraphView.hpp"
#include "graph/OpArgs.hpp"
#include "scheduler/Scheduler.hpp"
#include "aidge_cpu.hpp"
using namespace Aidge;
TEST_CASE("Scheduler cpu relu") {
std::shared_ptr<Tensor> inputTensor = std::make_shared<Tensor>(Array1D<int, 3>{-1, 2, -3});
std::shared_ptr<GraphView> g = Sequential({ReLU(0.0, "relu1")});
g->setDatatype(Aidge::DataType::Int32);
g->setBackend("cpu");
(*g)["relu1"]->getOperator()->setInput(0, inputTensor);
}
TEST_CASE("[core/backend/cpu] Scheduler(forward)") {
std::shared_ptr<Tensor> inputTensor =
std::make_shared<Tensor>(Array4D<int, 2, 1, 5, 5>{{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}}},
{{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}}}}});
std::shared_ptr<Tensor> weight1 = std::make_shared<Tensor>(
Array4D<int, 3, 1, 3, 3>{{{{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}},
{{{10, 11, 12}, {13, 14, 15}, {16, 17, 18}}},
{{{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}}}});
std::shared_ptr<Tensor> bias1 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
SECTION("Test Sequential graph") {
std::shared_ptr<GraphView> g =
Sequential({
Conv(1, 3, {3, 3}, "conv1"),
Conv(3, 4, {1, 1}, "conv2"),
Conv(4, 3, {1, 1}, "conv3"),
FC(5, false, "fc")});
g->setDatatype(Aidge::DataType::Int32);
g->setBackend("cpu");
(*g)["conv1"]->getOperator()->setInput(0, inputTensor);
(*g)["conv1"]->getOperator()->setInput(1, weight1);
(*g)["conv1"]->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> weight2 =
std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}},
{{{4}}, {{5}}, {{6}}},
{{{7}}, {{8}}, {{9}}},
{{{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
(*g)["conv2"]->getOperator()->setInput(1, weight2);
(*g)["conv2"]->getOperator()->setInput(2, bias2);
std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>(
Array4D<int, 3, 4, 1, 1>{{{{{1}}, {{2}}, {{3}}, {{4}}},
{{{5}}, {{6}}, {{7}}, {{8}}},
{{{9}}, {{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
(*g)["conv3"]->getOperator()->setInput(1, weight3);
(*g)["conv3"]->getOperator()->setInput(2, bias3);
std::shared_ptr<Tensor> weightfc = std::make_shared<Tensor>(
Array2D<int, 5, 27>{{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9},
{10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6},
{7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3},
{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}});
std::shared_ptr<Tensor> biasfc = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
(*g)["fc"]->getOperator()->setInput(1, weightfc);
(*g)["fc"]->getOperator()->setInput(2, biasfc);
// input->addChild(g);
g->forwardDims();
SequentialScheduler scheduler(g);
REQUIRE_NOTHROW(scheduler.forward());
scheduler.saveSchedulingDiagram("schedulingSequential");
std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
{{{{367, 412, 457}, {592, 637, 682}, {817, 862, 907}},
{{854, 980, 1106}, {1484, 1610, 1736}, {2114, 2240, 2366}},
{{1341, 1548, 1755}, {2376, 2583, 2790}, {3411, 3618, 3825}}},
{{{1492, 1537, 1582}, {1717, 1762, 1807}, {1942, 1987, 2032}},
{{4004, 4130, 4256}, {4634, 4760, 4886}, {5264, 5390, 5516}},
{{6516, 6723, 6930}, {7551, 7758, 7965}, {8586, 8793, 9000}}}}});
std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{
{{{{6099, 7017, 7935}, {10689, 11607, 12525}, {15279, 16197, 17115}},
{{13786, 15838, 17890}, {24046, 26098, 28150}, {34306, 36358, 38410}},
{{21473, 24659, 27845}, {37403, 40589, 43775}, {53333, 56519, 59705}},
{{29160, 33480, 37800}, {50760, 55080, 59400}, {72360, 76680, 81000}}},
{{{29049, 29967, 30885}, {33639, 34557, 35475}, {38229, 39147, 40065}},
{{65086, 67138, 69190}, {75346, 77398, 79450}, {85606, 87658, 89710}},
{{101123, 104309, 107495}, {117053, 120239, 123425}, {132983, 136169, 139355}},
{{137160, 141480, 145800}, {158760, 163080, 167400}, {180360, 184680, 189000}}}}});
std::shared_ptr<Tensor> expectedOutput3 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
{{{{214731, 246591, 278451}, {374031, 405891, 437751}, {533331, 565191, 597051}},
{{496804, 570568, 644332}, {865624, 939388, 1013152}, {1234444, 1308208, 1381972}},
{{778877, 894545, 1010213}, {1357217, 1472885, 1588553}, {1935557, 2051225, 2166893}}},
{{{1011231, 1043091, 1074951}, {1170531, 1202391, 1234251}, {1329831, 1361691, 1393551}},
{{2340904, 2414668, 2488432}, {2709724, 2783488, 2857252}, {3078544, 3152308, 3226072}},
{{3670577, 3786245, 3901913}, {4248917, 4364585, 4480253}, {4827257, 4942925, 5058593}}}}});
std::shared_ptr<Tensor> expectedOutput4 = std::make_shared<Tensor>(
Array2D<int, 2, 5>{{{205050376, 198925904, 181355097, 196978090, 238868348},
{598467376, 561797804, 560823897, 593043790, 698672948}}});
std::shared_ptr<Tensor> other1 =
std::static_pointer_cast<Tensor>((*g)["conv1"]->getOperator()->getOutput(0));
bool equal1 = (*other1 == *expectedOutput1);
REQUIRE(equal1);
std::shared_ptr<Tensor> other2 =
std::static_pointer_cast<Tensor>((*g)["conv2"]->getOperator()->getOutput(0));
bool equal2 = (*other2 == *expectedOutput2);
REQUIRE(equal2);
std::shared_ptr<Tensor> other3 =
std::static_pointer_cast<Tensor>((*g)["conv3"]->getOperator()->getOutput(0));
bool equal3 = (*other3 == *expectedOutput3);
REQUIRE(equal3);
std::shared_ptr<Tensor> other4 =
std::static_pointer_cast<Tensor>((*g)["fc"]->getOperator()->getOutput(0));
bool equal4 = (*other4 == *expectedOutput4);
REQUIRE(equal4);
}
SECTION("Test Parallel graph") {
std::shared_ptr<GraphView> g =
Sequential({Conv(1, 3, {3, 3}, "inputConv"),
Parallel({
Conv(3, 3, {1, 1}, "conv1.1"),
Conv(3, 3, {1, 1}, "conv1.2"),
Conv(3, 3, {1, 1}, "conv1.3")}),
Add<3>("add1"),
Conv(3, 2, {1, 1}, "conv2"),
FC(5, false, "out")});
g->setBackend("cpu");
g->setDatatype(Aidge::DataType::Int32);
(*g)["inputConv"]->getOperator()->setInput(0, inputTensor);
(*g)["inputConv"]->getOperator()->setInput(1, weight1);
(*g)["inputConv"]->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv11Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
{{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}, {{{7}}, {{8}}, {{9}}}}});
(*g)["conv1.1"]->getOperator()->setInput(1, conv11Weight);
(*g)["conv1.1"]->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv12Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
{{{{11}}, {{12}}, {{13}}}, {{{14}}, {{15}}, {{16}}}, {{{17}}, {{18}}, {{19}}}}});
(*g)["conv1.2"]->getOperator()->setInput(1, conv12Weight);
(*g)["conv1.2"]->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv13Weight = std::make_shared<Tensor>(Array4D<int, 3, 3, 1, 1>{
{{{{21}}, {{22}}, {{23}}}, {{{24}}, {{25}}, {{26}}}, {{{27}}, {{28}}, {{29}}}}});
(*g)["conv1.3"]->getOperator()->setInput(1, conv13Weight);
(*g)["conv1.3"]->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> conv2Weight = std::make_shared<Tensor>(
Array4D<int, 2, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}}, {{{4}}, {{5}}, {{6}}}}});
std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 2>{{1, 2}});
(*g)["conv2"]->getOperator()->setInput(1, conv2Weight);
(*g)["conv2"]->getOperator()->setInput(2, bias2);
std::shared_ptr<Tensor> fcWeight = std::make_shared<Tensor>(
Array2D<int, 5, 18>{{{1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3},
{4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1},
{2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4},
{5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2},
{3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}}});
std::shared_ptr<Tensor> fcBias = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
(*g)["out"]->getOperator()->setInput(1, fcWeight);
(*g)["out"]->getOperator()->setInput(2, fcBias);
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(
Array2D<int, 2, 5>{{{124324368, 130692907, 133325056, 125044620, 142843879},
{369195468, 394615207, 382643056, 379441320, 416291779}}});
g->forwardDims();
SequentialScheduler scheduler(g);
REQUIRE_NOTHROW(scheduler.forward());
scheduler.saveSchedulingDiagram("schedulingSequential");
std::shared_ptr<Tensor> result =
std::static_pointer_cast<Tensor>((*g)["out"]->getOperator()->getOutput(0));
bool equal = (*result == *expectedOutput);
REQUIRE(equal);
}
SECTION("Test Residual graph") {
// /!\ TODO: To be completed when behaviour is confirmed
// std::shared_ptr<GraphView> g = Residual({
// Conv(3, 4, {1,1}, "conv1"),
// Conv(4, 3, {1,1}, "conv2"),
// Add<2>("add")
// });
// g->setDatatype(Aidge::DataType::Int32);
// g->setBackend("cpu");
// std::shared_ptr<Tensor> inputTensor = std::make_shared<Tensor>(Array4D<int,5,5,1,2> {
// {
// {
// {{ 0, 25}},
// {{ 1, 26}},
// {{ 2, 27}},
// {{ 3, 28}},
// {{ 4, 29}}
// },
// {
// {{ 5, 30}},
// {{ 6, 31}},
// {{ 7, 32}},
// {{ 8, 33}},
// {{ 9, 34}}
// },
// {
// {{10, 35}},
// {{11, 36}},
// {{12, 37}},
// {{13, 38}},
// {{14, 39}}
// },
// {
// {{15, 40}},
// {{16, 41}},
// {{17, 42}},
// {{18, 43}},
// {{19, 44}}
// },
// {
// {{20, 45}},
// {{21, 46}},
// {{22, 47}},
// {{23, 48}},
// {{24, 49}}
// }
// }});
// std::shared_ptr<Tensor> weight1 = std::make_shared<Tensor>(Array4D<int,1,1,3,4> {
// {{{
// { 1, 4, 7, 10},
// { 2, 5, 8, 11},
// { 3, 6, 9, 12}
// }}}});
// std::shared_ptr<Tensor> bias1 = std::make_shared<Tensor>(Array1D<int,4> {
// {1,2,3,4}
// });
// (*g)["conv1"]->getOperator()->setInput(1,weight1);
// (*g)["conv1"]->getOperator()->setInput(2,bias1);
// std::shared_ptr<Tensor> weight2 = std::make_shared<Tensor>(Array4D<int,1,1,4,3> {
// {{{
// { 1, 5, 9},
// { 2, 6, 10},
// { 3, 7, 11},
// { 4, 8, 12}
// }}}});
// std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int,3> {
// {1,2,3}
// });
// (*g)["conv2"]->getOperator()->setInput(1,weight2);
// (*g)["conv2"]->getOperator()->setInput(2,bias2);
// g->forwardDims();
// SequentialScheduler scheduler(g);
// REQUIRE_NOTHROW(scheduler.forward());
// scheduler.saveSchedulingDiagram("schedulingSequential");
}
SECTION("Test Recurrent graph") {}
}
\ No newline at end of file
#include <array>
#include <catch2/catch_test_macros.hpp>
#include "data/Tensor.hpp"
#include "data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("Tensor creation") {
SECTION("from const array") {
Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
Tensor xFloat =
Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("Access to array") {
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
REQUIRE(x.get<int>(std::array<std::size_t, 3>({0, 0, 0})) == 1);
REQUIRE(x.get<int>(std::array<std::size_t, 3>({0, 0, 1})) == 2);
REQUIRE(x.get<int>(std::array<std::size_t, 3>({0, 1, 1})) == 4);
REQUIRE(x.get<int>(std::array<std::size_t, 3>({1, 1, 0})) == 7);
x.get<int>(std::array<std::size_t, 3>({1, 1, 1})) = 36;
REQUIRE(x.get<int>(std::array<std::size_t, 3>({1, 1, 1})) == 36);
}
SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
}
}
#include <catch2/catch_test_macros.hpp>
#include "data/Tensor.hpp"
#include "data/TensorImpl.hpp"
#include "operator/Add.hpp"
using namespace Aidge;
TEST_CASE("Add forward") {
std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{
{
{{20, 47},{21, 48},{22, 49}},
{{23, 50},{24, 51},{25, 52}},
{{26, 53},{27, 54},{28, 55}}
},
{
{{29, 56},{30, 57},{31, 58}},
{{32, 59},{33, 60},{34, 61}},
{{35, 62},{36, 63},{37, 64}}
},
{
{{38, 65},{39, 66},{40, 67}},
{{41, 68},{42, 69},{43, 70}},
{{44, 71},{45, 72},{46, 73}}
}
}
});
SECTION("One input") {
std::shared_ptr<Node> myAdd = Add<1>();
myAdd->getOperator()->setBackend("cpu");
myAdd->getOperator()->setDatatype(DataType::Int32);
myAdd->getOperator()->setInput(0, input1);
myAdd->getOperator()->computeOutputDims();
myAdd->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *input1);
}
SECTION("Two inputs") {
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{
{
{{40, 94},{42, 96},{44, 98}},
{{46, 100},{48, 102},{50, 104}},
{{52, 106},{54, 108},{56, 110}}
},
{
{{58, 112},{60, 114},{62, 116}},
{{64, 118},{66, 120},{68, 122}},
{{70, 124},{72, 126},{74, 128}}
},
{
{{76, 130},{78, 132},{80, 134}},
{{82, 136},{84, 138},{86, 140}},
{{88, 142},{90, 144},{92, 146}}
}
}
});
std::shared_ptr<Node> myAdd = Add<2>();
myAdd->getOperator()->setDatatype(DataType::Int32);
myAdd->getOperator()->setBackend("cpu");
myAdd->getOperator()->setInput(0, input1);
myAdd->getOperator()->setInput(1, input1);
myAdd->getOperator()->computeOutputDims();
myAdd->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
}
SECTION("Three inputs") {
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
{
{
{{ 60, 141},{ 63, 144},{ 66, 147}},
{{ 69, 150},{ 72, 153},{ 75, 156}},
{{ 78, 159},{ 81, 162},{ 84, 165}}
},
{
{{ 87, 168},{ 90, 171},{ 93, 174}},
{{ 96, 177},{ 99, 180},{102, 183}},
{{105, 186},{108, 189},{111, 192}}
},
{
{{114, 195},{117, 198},{120, 201}},
{{123, 204},{126, 207},{129, 210}},
{{132, 213},{135, 216},{138, 219}}
}
}
});
std::shared_ptr<Node> myAdd = Add<3>();
myAdd->getOperator()->setDatatype(DataType::Int32);
myAdd->getOperator()->setBackend("cpu");
myAdd->getOperator()->setInput(0, input1);
myAdd->getOperator()->setInput(1, input1);
myAdd->getOperator()->setInput(2, input1);
myAdd->getOperator()->computeOutputDims();
myAdd->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
}
}
\ No newline at end of file
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include "data/Tensor.hpp"
#include "data/TensorImpl.hpp"
#include "operator/Conv.hpp"
using namespace Aidge;
TEST_CASE("Conv forward") {
std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
myConv->getOperator()->setDatatype(DataType::Int32);
myConv->getOperator()->setBackend("cpu");
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
{
{
{{ 0, 1, 2},
{ 3, 4, 5},
{ 6, 7, 8}},
{{ 9, 10, 11},
{ 12, 13, 14},
{ 15, 16, 17}},
{{ 18, 19, 20},
{ 21, 22, 23},
{ 24, 25, 26}}
},
{
{{ 27, 28, 29},
{ 30, 31, 32},
{ 33, 34, 35}},
{{ 36, 37, 38},
{ 39, 40, 41},
{ 42, 43, 44}},
{{ 45, 46, 47},
{ 48, 49, 50},
{ 51, 52, 53}}
},
{
{{ 54, 55, 56},
{ 57, 58, 59},
{ 60, 61, 62}},
{{ 63, 64, 65},
{ 66, 67, 68},
{ 69, 70, 71}},
{{ 72, 73, 74},
{ 75, 76, 77},
{ 78, 79, 80}}
},
{
{{ 81, 82, 83},
{ 84, 85, 86},
{ 87, 88, 89}},
{{ 90, 91, 92},
{ 93, 94, 95},
{ 96, 97, 98}},
{{ 99, 100, 101},
{102, 103, 104},
{105, 106, 107}}
}
}
});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> {
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
{
{
{{ 15226, 15577, 15928},
{ 16981, 17332, 17683},
{ 18736, 19087, 19438}},
{{ 37818, 38898, 39978},
{ 43218, 44298, 45378},
{ 48618, 49698, 50778}},
{{ 60426, 62235, 64044},
{ 69471, 71280, 73089},
{ 78516, 80325, 82134}},
{{ 83016, 85554, 88092},
{ 95706, 98244, 100782},
{108396, 110934, 113472}}
},
{
{{ 41551, 41902, 42253},
{ 43306, 43657, 44008},
{ 45061, 45412, 45763}},
{{118818, 119898, 120978},
{124218, 125298, 126378},
{129618, 130698, 131778}},
{{196101, 197910, 199719},
{205146, 206955, 208764},
{214191, 216000, 217809}},
{{273366, 275904, 278442},
{286056, 288594, 291132},
{298746, 301284, 303822}}
}
}
});
myConv->getOperator()->setInput(0,myInput);
myConv->getOperator()->setInput(1,myWeights);
myConv->getOperator()->setInput(2,myBias);
myConv->getOperator()->computeOutputDims();
myConv->forward();
std::static_pointer_cast<Tensor>(myConv->getOperator()->getOutput(0))->print();
REQUIRE(*std::static_pointer_cast<Tensor>(myConv->getOperator()->getOutput(0)) == *myOutput);
// std::cout << static_cast<Tensor>((*myConv->getOperator())["weight"])[0][0][0][0] << std::endl;
}
\ No newline at end of file
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include "data/TensorImpl.hpp"
#include "data/Tensor.hpp"
#include "operator/FC.hpp"
using namespace Aidge;
TEST_CASE("[module/backend/cpu/oeprator] FC(forward)") {
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}}});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int, 5>{{1, 2, 3, 4, 5}});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
{{23601, 23602, 23603, 23604, 23605}, {68601, 68602, 68603, 68604, 68605}}});
std::shared_ptr<Node> myFC = FC(5, false, "myfc");
myFC->getOperator()->setDatatype(DataType::Int32);
myFC->getOperator()->setBackend("cpu");
myFC->getOperator()->setInput(1, myWeights);
myFC->getOperator()->setInput(2, myBias);
SECTION("2D input") {
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74},
{75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
myFC->getOperator()->setInput(0, myInput);
myFC->getOperator()->computeOutputDims();
myFC->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
}
SECTION("4D input") {
std::shared_ptr<Tensor> myInput =
std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}},
{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}},
{{50, 51, 52, 53, 54},
{55, 56, 57, 58, 59},
{60, 61, 62, 63, 64},
{65, 66, 67, 68, 69},
{70, 71, 72, 73, 74}}},
{{{75, 76, 77, 78, 79},
{80, 81, 82, 83, 84},
{85, 86, 87, 88, 89},
{90, 91, 92, 93, 94},
{95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}}}});
myFC->getOperator()->setInput(0, myInput);
myFC->getOperator()->computeOutputDims();
myFC->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
}
// std::cout << static_cast<Tensor>((*myFC->getOperator())["weight"])[0][0][0][0] << std::endl;
}
\ No newline at end of file
#include <catch2/catch_test_macros.hpp>
#include "data/Tensor.hpp"
#include "data/TensorImpl.hpp"
#include "operator/ReLU.hpp"
using namespace Aidge;
TEST_CASE("ReLU forward") {
SECTION("1D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
{0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,10> {
{0, 1, 2, 0, 4, 0, 0, 7, 8, 9}
});
std::shared_ptr<Node> myReLU = ReLU();
myReLU->getOperator()->setDatatype(DataType::Int32);
myReLU->getOperator()->setBackend("cpu");
myReLU->getOperator()->setInput(0,input0);
myReLU->getOperator()->computeOutputDims();
myReLU->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
}
SECTION("2D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,10> {
{
{ 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
{ 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
}
});
std::shared_ptr<Node> myReLU = ReLU();
myReLU->getOperator()->setDatatype(DataType::Int32);
myReLU->getOperator()->setBackend("cpu");
myReLU->getOperator()->setInput(0,input0);
myReLU->getOperator()->computeOutputDims();
myReLU->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
}
SECTION("3D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
{
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,2,2,10> {
{
{
{ 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
{ 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
},
{
{ 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
{ 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
}
}
});
std::shared_ptr<Node> myReLU = ReLU();
myReLU->getOperator()->setDatatype(DataType::Int32);
myReLU->getOperator()->setBackend("cpu");
myReLU->getOperator()->setInput(0,input0);
myReLU->getOperator()->computeOutputDims();
myReLU->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
}
SECTION("4D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
{
{
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
},
{
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
{
{
{
{ 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
{ 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
},
{
{ 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
{ 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
}
},
{
{
{ 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
{ 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
},
{
{ 0, 1, 2, 0, 4, 0, 0, 7, 8, 9},
{ 0, 4, 2, 0, 4, 0, 0, 7, 0,10}
}
}
}
});
std::shared_ptr<Node> myReLU = ReLU();
myReLU->getOperator()->setDatatype(DataType::Int32);
myReLU->getOperator()->setBackend("cpu");
myReLU->getOperator()->setInput(0,input0);
myReLU->getOperator()->computeOutputDims();
myReLU->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
}
SECTION("Test leaky negative slope parameter: alpha") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> {
{0.0f, 1.0f, 2.0f,-3.0f, 4.0f,-5.0f,-6.0f, 7.0f, 8.0f, 9.0f}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<float,10> {
{0.0f, 1.0f, 2.0f,-1.5f, 4.0f,-2.5f,-3.0f, 7.0f, 8.0f, 9.0f}
});
std::shared_ptr<Node> myReLU = ReLU(0.5f);
myReLU->getOperator()->setDatatype(DataType::Float32);
myReLU->getOperator()->setBackend("cpu");
myReLU->getOperator()->setInput(0,input0);
myReLU->getOperator()->computeOutputDims();
myReLU->forward();
REQUIRE(*std::static_pointer_cast<Tensor>(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
}
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment