-
Maxence Naud authoredMaxence Naud authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Test_ConvImpl.cpp 10.21 KiB
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstdlib>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/backend/cpu.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] Conv(forward)", "[Conv][CPU]") {
SECTION("Classic Conv") {
std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
{
{
{{ 0, 1, 2},
{ 3, 4, 5},
{ 6, 7, 8}},
{{ 9, 10, 11},
{ 12, 13, 14},
{ 15, 16, 17}},
{{ 18, 19, 20},
{ 21, 22, 23},
{ 24, 25, 26}}
},
{
{{ 27, 28, 29},
{ 30, 31, 32},
{ 33, 34, 35}},
{{ 36, 37, 38},
{ 39, 40, 41},
{ 42, 43, 44}},
{{ 45, 46, 47},
{ 48, 49, 50},
{ 51, 52, 53}}
},
{
{{ 54, 55, 56},
{ 57, 58, 59},
{ 60, 61, 62}},
{{ 63, 64, 65},
{ 66, 67, 68},
{ 69, 70, 71}},
{{ 72, 73, 74},
{ 75, 76, 77},
{ 78, 79, 80}}
},
{
{{ 81, 82, 83},
{ 84, 85, 86},
{ 87, 88, 89}},
{{ 90, 91, 92},
{ 93, 94, 95},
{ 96, 97, 98}},
{{ 99, 100, 101},
{102, 103, 104},
{105, 106, 107}}
}
}
});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}});
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
{
{{ 0, 1, 2, 3, 4},
{ 5, 6, 7, 8, 9},
{ 10, 11, 12, 13, 14},
{ 15, 16, 17, 18, 19},
{ 20, 21, 22, 23, 24}},
{{ 25, 26, 27, 28, 29},
{ 30, 31, 32, 33, 34},
{ 35, 36, 37, 38, 39},
{ 40, 41, 42, 43, 44},
{ 45, 46, 47, 48, 49}},
{{ 50, 51, 52, 53, 54},
{ 55, 56, 57, 58, 59},
{ 60, 61, 62, 63, 64},
{ 65, 66, 67, 68, 69},
{ 70, 71, 72, 73, 74}}
},
{
{{ 75, 76, 77, 78, 79},
{ 80, 81, 82, 83, 84},
{ 85, 86, 87, 88, 89},
{ 90, 91, 92, 93, 94},
{ 95, 96, 97, 98, 99}},
{{100, 101, 102, 103, 104},
{105, 106, 107, 108, 109},
{110, 111, 112, 113, 114},
{115, 116, 117, 118, 119},
{120, 121, 122, 123, 124}},
{{125, 126, 127, 128, 129},
{130, 131, 132, 133, 134},
{135, 136, 137, 138, 139},
{140, 141, 142, 143, 144},
{145, 146, 147, 148, 149}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> {
{
{
{{ 15226, 15577, 15928},
{ 16981, 17332, 17683},
{ 18736, 19087, 19438}},
{{ 37818, 38898, 39978},
{ 43218, 44298, 45378},
{ 48618, 49698, 50778}},
{{ 60426, 62235, 64044},
{ 69471, 71280, 73089},
{ 78516, 80325, 82134}},
{{ 83016, 85554, 88092},
{ 95706, 98244, 100782},
{108396, 110934, 113472}}
},
{
{{ 41551, 41902, 42253},
{ 43306, 43657, 44008},
{ 45061, 45412, 45763}},
{{118818, 119898, 120978},
{124218, 125298, 126378},
{129618, 130698, 131778}},
{{196101, 197910, 199719},
{205146, 206955, 208764},
{214191, 216000, 217809}},
{{273366, 275904, 278442},
{286056, 288594, 291132},
{298746, 301284, 303822}}
}
}
});
op->associateInput(0,myInput);
op->associateInput(1,myWeights);
op->associateInput(2,myBias);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
op->computeOutputDims();
myConv->forward();
// op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Point-wise") {
std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1});
auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
op->setInput(0, std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
{
{
{{-1.38467371F, -0.87123615F, -0.22336592F},
{ 1.71736145F, 0.31888032F, -0.42451897F},
{ 0.30572093F, -0.77459252F, -1.55757248F}},
{{ 0.99563611F, -0.87978584F, -0.60114205F},
{-1.27415121F, 2.12278509F, -1.23465312F},
{-0.48791388F, -0.91382301F, -0.65813726F}},
{{ 0.07802387F, 0.52580875F, -0.48799172F},
{ 1.19136906F, -0.81400764F, -0.73599279F},
{-1.40324783F, 0.03600367F, -0.06347727F}}
},
{
{{ 0.67561489F, -0.09780689F, 1.84459400F},
{-1.18453741F, 1.38354933F, 1.44513381F},
{ 0.85641253F, 2.21807575F, 0.52316552F}},
{{ 0.34664667F, -0.19733144F, 1.14120162F},
{ 0.05164360F, 0.72810954F, -0.71064192F},
{-0.60206831F, 0.96044880F, 0.40481427F}},
{{-1.35434294F, 1.33470297F, 0.48353928F},
{-0.19756168F, 1.26831138F, 1.22426283F},
{ 0.09811721F, 1.74225271F, -1.35267365F}}
}
}
}));
op->setInput(1, std::make_shared<Tensor>(Array4D<float,4,3,1,1> {
{
{
{{ 0.33669037F}},
{{ 0.12880941F}},
{{ 0.23446237F}}
},
{
{{ 0.23033303F}},
{{-1.12285638F}},
{{-0.18632829F}}
},
{
{{ 2.20820141F}},
{{-0.63799703F}},
{{ 0.46165723F}}},
{
{{ 0.26735088F}},
{{ 0.53490466F}},
{{ 0.80935723F}}
}
}
}));
op->setInput(2, std::make_shared<Tensor>(Array1D<float,4> {{ 1.11029029F, -1.68979895F, -0.98895991F, 0.95797181F}}));
Tensor expectedOutput = Array4D<float,2,4,3,3> {
{
{
{{ 0.79062498F, 0.82691115F, 0.84323663F},
{ 1.80371785F, 1.30023468F, 0.63576132F},
{ 0.82136691F, 0.74022496F, 0.48621333F}},
{{-3.14122939F, -1.00057328F, -0.97532475F},
{-0.08553087F, -3.84826040F, -0.26410526F},
{-0.81005937F, -0.84882969F, -1.29773819F}},
{{-4.64579105F, -2.10878062F, -1.32395494F},
{ 4.16622877F, -2.01493120F, -1.47845459F},
{-0.65039843F, -2.09977841F, -4.03780890F}},
{{ 1.18349767F, 0.68001163F, 0.18174142F},
{ 1.69980371F, 1.51988935F, -0.41162649F},
{-0.35700959F, 0.29121545F, 0.13813695F}}
},
{
{{ 1.06487226F, 1.36487913F, 1.99171650F},
{ 0.67179936F, 1.96727657F, 1.79235911F},
{ 1.34408879F, 2.38930249F, 1.02142799F}},
{{-1.67106462F, -1.73944509F, -2.63643050F},
{-1.98381400F, -2.42500663F, -0.78710288F},
{-0.83478457F, -2.58197999F, -1.77180362F}},
{{-0.34346789F, -0.46286502F, 2.57942152F},
{-3.72881150F, 2.18718910F, 3.22076392F},
{ 1.33158576F, 4.10055828F, -0.71644694F}},
{{ 0.22787374F, 1.90652108F, 2.45291567F},
{ 0.50901115F, 2.74385118F, 1.95506990F},
{ 0.94429719F, 3.47482967F, 0.21958135F}}
}
}
};
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->computeOutputDims();
myConv->forward();
float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput.getImpl()->rawPtr());
for (std::size_t i = 0; i< expectedOutput.size(); ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
}