Skip to content
Snippets Groups Projects
Commit da1bf235 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Initial commit with working concept

parents
No related branches found
No related tags found
No related merge requests found
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include <string>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/recipes/Recipes.hpp"
#include "aidge/backend/arrayfire.hpp"
using namespace Aidge;
TEST_CASE("[arrayfire/castmove] CastMove(forward)") {
std::shared_ptr<Tensor> inputTensor =
std::make_shared<Tensor>(Array4D<int, 2, 1, 5, 5>{{{{{0, 1, 2, 3, 4},
{5, 6, 7, 8, 9},
{10, 11, 12, 13, 14},
{15, 16, 17, 18, 19},
{20, 21, 22, 23, 24}}},
{{{25, 26, 27, 28, 29},
{30, 31, 32, 33, 34},
{35, 36, 37, 38, 39},
{40, 41, 42, 43, 44},
{45, 46, 47, 48, 49}}}}});
std::shared_ptr<Tensor> weight1 = std::make_shared<Tensor>(
Array4D<int, 3, 1, 3, 3>{{{{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}},
{{{10, 11, 12}, {13, 14, 15}, {16, 17, 18}}},
{{{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}}}});
std::shared_ptr<Tensor> bias1 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
SECTION("Test implicit") {
std::shared_ptr<GraphView> g =
Sequential({
Conv(1, 3, {3, 3}, "conv1"),
Conv(3, 4, {1, 1}, "conv2"),
Conv(4, 3, {1, 1}, "conv3")});
g->getNode("conv1")->getOperator()->setInput(0, inputTensor);
g->getNode("conv1")->getOperator()->setInput(1, weight1);
g->getNode("conv1")->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> weight2 =
std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}},
{{{4}}, {{5}}, {{6}}},
{{{7}}, {{8}}, {{9}}},
{{{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
g->getNode("conv2")->getOperator()->setInput(1, weight2);
g->getNode("conv2")->getOperator()->setInput(2, bias2);
// *(g->getNode("conv2")->getOperator()->input(1, weight2);
std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>(
Array4D<int, 3, 4, 1, 1>{{{{{1}}, {{2}}, {{3}}, {{4}}},
{{{5}}, {{6}}, {{7}}, {{8}}},
{{{9}}, {{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
g->getNode("conv3")->getOperator()->setInput(1, weight3);
g->getNode("conv3")->getOperator()->setInput(2, bias3);
// input->addChild(g);
g->setDataType(Aidge::DataType::Float32);
g->getNode("conv1")->getOperator()->setDataType(DataType::Float16);
g->getNode("conv3")->getOperator()->setDataType(DataType::Float64);
g->setBackend("arrayfire");
g->forwardDims();
SequentialScheduler scheduler(g);
REQUIRE_NOTHROW(scheduler.forward());
scheduler.saveSchedulingDiagram("schedulingSequential");
std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
{{{{367, 412, 457}, {592, 637, 682}, {817, 862, 907}},
{{854, 980, 1106}, {1484, 1610, 1736}, {2114, 2240, 2366}},
{{1341, 1548, 1755}, {2376, 2583, 2790}, {3411, 3618, 3825}}},
{{{1492, 1537, 1582}, {1717, 1762, 1807}, {1942, 1987, 2032}},
{{4004, 4130, 4256}, {4634, 4760, 4886}, {5264, 5390, 5516}},
{{6516, 6723, 6930}, {7551, 7758, 7965}, {8586, 8793, 9000}}}}});
std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{
{{{{6099, 7017, 7935}, {10689, 11607, 12525}, {15279, 16197, 17115}},
{{13786, 15838, 17890}, {24046, 26098, 28150}, {34306, 36358, 38410}},
{{21473, 24659, 27845}, {37403, 40589, 43775}, {53333, 56519, 59705}},
{{29160, 33480, 37800}, {50760, 55080, 59400}, {72360, 76680, 81000}}},
{{{29049, 29967, 30885}, {33639, 34557, 35475}, {38229, 39147, 40065}},
{{65086, 67138, 69190}, {75346, 77398, 79450}, {85606, 87658, 89710}},
{{101123, 104309, 107495}, {117053, 120239, 123425}, {132983, 136169, 139355}},
{{137160, 141480, 145800}, {158760, 163080, 167400}, {180360, 184680, 189000}}}}});
std::shared_ptr<Tensor> expectedOutput3 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
{{{{214731, 246591, 278451}, {374031, 405891, 437751}, {533331, 565191, 597051}},
{{496804, 570568, 644332}, {865624, 939388, 1013152}, {1234444, 1308208, 1381972}},
{{778877, 894545, 1010213}, {1357217, 1472885, 1588553}, {1935557, 2051225, 2166893}}},
{{{1011231, 1043091, 1074951}, {1170531, 1202391, 1234251}, {1329831, 1361691, 1393551}},
{{2340904, 2414668, 2488432}, {2709724, 2783488, 2857252}, {3078544, 3152308, 3226072}},
{{3670577, 3786245, 3901913}, {4248917, 4364585, 4480253}, {4827257, 4942925, 5058593}}}}});
std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0);
Tensor hostOther1(other1->dataType());
hostOther1.setBackend("cpu");
hostOther1.copyCastFrom(*other1);
hostOther1.print();
REQUIRE(approxEq<half_float::half, int>(hostOther1, *expectedOutput1, 0.001, 0.0));
std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
Tensor hostOther2(other2->dataType());
hostOther2.setBackend("cpu");
hostOther2.copyCastFrom(*other2);
REQUIRE(approxEq<float, int>(hostOther2, *expectedOutput2, 0.001, 0.0));
std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
Tensor hostOther3(other3->dataType());
hostOther3.setBackend("cpu");
hostOther3.copyCastFrom(*other3);
REQUIRE(approxEq<double, int>(hostOther3, *expectedOutput3, 0.001, 0.0));
}
SECTION("Test explicit") {
std::shared_ptr<GraphView> g =
Sequential({
Conv(1, 3, {3, 3}, "conv1"),
Conv(3, 4, {1, 1}, "conv2"),
Conv(4, 3, {1, 1}, "conv3")});
g->getNode("conv1")->getOperator()->setInput(0, inputTensor);
g->getNode("conv1")->getOperator()->setInput(1, weight1);
g->getNode("conv1")->getOperator()->setInput(2, bias1);
std::shared_ptr<Tensor> weight2 =
std::make_shared<Tensor>(Array4D<int, 4, 3, 1, 1>{{{{{1}}, {{2}}, {{3}}},
{{{4}}, {{5}}, {{6}}},
{{{7}}, {{8}}, {{9}}},
{{{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias2 = std::make_shared<Tensor>(Array1D<int, 4>{{1, 2, 3, 4}});
g->getNode("conv2")->getOperator()->setInput(1, weight2);
g->getNode("conv2")->getOperator()->setInput(2, bias2);
// *(g->getNode("conv2")->getOperator()->input(1, weight2);
std::shared_ptr<Tensor> weight3 = std::make_shared<Tensor>(
Array4D<int, 3, 4, 1, 1>{{{{{1}}, {{2}}, {{3}}, {{4}}},
{{{5}}, {{6}}, {{7}}, {{8}}},
{{{9}}, {{10}}, {{11}}, {{12}}}}});
std::shared_ptr<Tensor> bias3 = std::make_shared<Tensor>(Array1D<int, 3>{{1, 2, 3}});
g->getNode("conv3")->getOperator()->setInput(1, weight3);
g->getNode("conv3")->getOperator()->setInput(2, bias3);
// input->addChild(g);
g->setDataType(Aidge::DataType::Float32);
g->getNode("conv1")->getOperator()->setDataType(DataType::Float16);
g->getNode("conv3")->getOperator()->setDataType(DataType::Float64);
explicitCastMove(g);
g->setBackend("arrayfire");
g->forwardDims();
SequentialScheduler scheduler(g);
REQUIRE_NOTHROW(scheduler.forward());
scheduler.saveSchedulingDiagram("schedulingSequential");
std::shared_ptr<Tensor> expectedOutput1 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
{{{{367, 412, 457}, {592, 637, 682}, {817, 862, 907}},
{{854, 980, 1106}, {1484, 1610, 1736}, {2114, 2240, 2366}},
{{1341, 1548, 1755}, {2376, 2583, 2790}, {3411, 3618, 3825}}},
{{{1492, 1537, 1582}, {1717, 1762, 1807}, {1942, 1987, 2032}},
{{4004, 4130, 4256}, {4634, 4760, 4886}, {5264, 5390, 5516}},
{{6516, 6723, 6930}, {7551, 7758, 7965}, {8586, 8793, 9000}}}}});
std::shared_ptr<Tensor> expectedOutput2 = std::make_shared<Tensor>(Array4D<int, 2, 4, 3, 3>{
{{{{6099, 7017, 7935}, {10689, 11607, 12525}, {15279, 16197, 17115}},
{{13786, 15838, 17890}, {24046, 26098, 28150}, {34306, 36358, 38410}},
{{21473, 24659, 27845}, {37403, 40589, 43775}, {53333, 56519, 59705}},
{{29160, 33480, 37800}, {50760, 55080, 59400}, {72360, 76680, 81000}}},
{{{29049, 29967, 30885}, {33639, 34557, 35475}, {38229, 39147, 40065}},
{{65086, 67138, 69190}, {75346, 77398, 79450}, {85606, 87658, 89710}},
{{101123, 104309, 107495}, {117053, 120239, 123425}, {132983, 136169, 139355}},
{{137160, 141480, 145800}, {158760, 163080, 167400}, {180360, 184680, 189000}}}}});
std::shared_ptr<Tensor> expectedOutput3 = std::make_shared<Tensor>(Array4D<int, 2, 3, 3, 3>{
{{{{214731, 246591, 278451}, {374031, 405891, 437751}, {533331, 565191, 597051}},
{{496804, 570568, 644332}, {865624, 939388, 1013152}, {1234444, 1308208, 1381972}},
{{778877, 894545, 1010213}, {1357217, 1472885, 1588553}, {1935557, 2051225, 2166893}}},
{{{1011231, 1043091, 1074951}, {1170531, 1202391, 1234251}, {1329831, 1361691, 1393551}},
{{2340904, 2414668, 2488432}, {2709724, 2783488, 2857252}, {3078544, 3152308, 3226072}},
{{3670577, 3786245, 3901913}, {4248917, 4364585, 4480253}, {4827257, 4942925, 5058593}}}}});
std::shared_ptr<Tensor> other1 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv1")->getOperator())->getOutput(0);
Tensor hostOther1(other1->dataType());
hostOther1.setBackend("cpu");
hostOther1.copyCastFrom(*other1);
REQUIRE(approxEq<half_float::half, int>(hostOther1, *expectedOutput1, 0.001, 0.0));
std::shared_ptr<Tensor> other2 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv2")->getOperator())->getOutput(0);
Tensor hostOther2(other2->dataType());
hostOther2.setBackend("cpu");
hostOther2.copyCastFrom(*other2);
REQUIRE(approxEq<float, int>(hostOther2, *expectedOutput2, 0.001, 0.0));
std::shared_ptr<Tensor> other3 = std::static_pointer_cast<OperatorTensor>(g->getNode("conv3")->getOperator())->getOutput(0);
Tensor hostOther3(other3->dataType());
hostOther3.setBackend("cpu");
hostOther3.copyCastFrom(*other3);
REQUIRE(approxEq<double, int>(hostOther3, *expectedOutput3, 0.001, 0.0));
}
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstdlib>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/backend/arrayfire.hpp"
using namespace Aidge;
TEST_CASE("[arrayfire/operator] Conv(forward)", "[Conv][arrayfire]") {
SECTION("Classic Conv") {
std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1});
auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
op->setInput(0, std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
{
{
{{-1.38467371F, -0.87123615F, -0.22336592F},
{ 1.71736145F, 0.31888032F, -0.42451897F},
{ 0.30572093F, -0.77459252F, -1.55757248F}},
{{ 0.99563611F, -0.87978584F, -0.60114205F},
{-1.27415121F, 2.12278509F, -1.23465312F},
{-0.48791388F, -0.91382301F, -0.65813726F}},
{{ 0.07802387F, 0.52580875F, -0.48799172F},
{ 1.19136906F, -0.81400764F, -0.73599279F},
{-1.40324783F, 0.03600367F, -0.06347727F}}
},
{
{{ 0.67561489F, -0.09780689F, 1.84459400F},
{-1.18453741F, 1.38354933F, 1.44513381F},
{ 0.85641253F, 2.21807575F, 0.52316552F}},
{{ 0.34664667F, -0.19733144F, 1.14120162F},
{ 0.05164360F, 0.72810954F, -0.71064192F},
{-0.60206831F, 0.96044880F, 0.40481427F}},
{{-1.35434294F, 1.33470297F, 0.48353928F},
{-0.19756168F, 1.26831138F, 1.22426283F},
{ 0.09811721F, 1.74225271F, -1.35267365F}}
}
}
}));
op->setInput(1, std::make_shared<Tensor>(Array4D<float,4,3,1,1> {
{
{
{{ 0.33669037F}},
{{ 0.12880941F}},
{{ 0.23446237F}}
},
{
{{ 0.23033303F}},
{{-1.12285638F}},
{{-0.18632829F}}
},
{
{{ 2.20820141F}},
{{-0.63799703F}},
{{ 0.46165723F}}},
{
{{ 0.26735088F}},
{{ 0.53490466F}},
{{ 0.80935723F}}
}
}
}));
op->setInput(2, std::make_shared<Tensor>(Array1D<float,4> {{ 1.11029029F, -1.68979895F, -0.98895991F, 0.95797181F}}));
Tensor expectedOutput = Array4D<float,2,4,3,3> {
{
{
{{ 0.79062498F, 0.82691115F, 0.84323663F},
{ 1.80371785F, 1.30023468F, 0.63576132F},
{ 0.82136691F, 0.74022496F, 0.48621333F}},
{{-3.14122939F, -1.00057328F, -0.97532475F},
{-0.08553087F, -3.84826040F, -0.26410526F},
{-0.81005937F, -0.84882969F, -1.29773819F}},
{{-4.64579105F, -2.10878062F, -1.32395494F},
{ 4.16622877F, -2.01493120F, -1.47845459F},
{-0.65039843F, -2.09977841F, -4.03780890F}},
{{ 1.18349767F, 0.68001163F, 0.18174142F},
{ 1.69980371F, 1.51988935F, -0.41162649F},
{-0.35700959F, 0.29121545F, 0.13813695F}}
},
{
{{ 1.06487226F, 1.36487913F, 1.99171650F},
{ 0.67179936F, 1.96727657F, 1.79235911F},
{ 1.34408879F, 2.38930249F, 1.02142799F}},
{{-1.67106462F, -1.73944509F, -2.63643050F},
{-1.98381400F, -2.42500663F, -0.78710288F},
{-0.83478457F, -2.58197999F, -1.77180362F}},
{{-0.34346789F, -0.46286502F, 2.57942152F},
{-3.72881150F, 2.18718910F, 3.22076392F},
{ 1.33158576F, 4.10055828F, -0.71644694F}},
{{ 0.22787374F, 1.90652108F, 2.45291567F},
{ 0.50901115F, 2.74385118F, 1.95506990F},
{ 0.94429719F, 3.47482967F, 0.21958135F}}
}
}
};
op->setDataType(DataType::Float32);
op->setBackend("arrayfire");
myConv->forward();
float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput.getImpl()->rawPtr());
for (std::size_t i = 0; i< expectedOutput.size(); ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
}
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/backend/cpu.hpp"
#include "aidge/backend/arrayfire/data/TensorImpl.hpp"
using namespace Aidge;
TEST_CASE("Tensor creation arrayfire", "[Tensor][arrayfire]") {
SECTION("from const array") {
Tensor x;
x.setDataType(Aidge::DataType::Int32);
x.setBackend("arrayfire");
x = Array3D<int,2,2,2>{
{
{
{1, 2},
{3, 4}
},
{
{5, 6},
{7, 8}
}
}};
Tensor xCopy;
xCopy.setDataType(Aidge::DataType::Int32);
xCopy.setBackend("arrayfire");
xCopy = Array3D<int,2,2,2>{
{
{
{1, 2},
{3, 4}
},
{
{5, 6},
{7, 8}
}
}};
Tensor xFloat;
xFloat.setBackend("arrayfire");
xFloat = Array3D<float,2,2,2>{
{
{
{1., 2.},
{3., 4.}
},
{
{5., 6.},
{7., 8.}
}
}};
SECTION("Tensor features") {
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
}
SECTION("arrayfire tensor features") {
REQUIRE(static_cast<TensorImpl_arrayfire<int>*>(x.getImpl().get())->data().dims(0) == 2);
REQUIRE(static_cast<TensorImpl_arrayfire<int>*>(x.getImpl().get())->data().dims(1) == 2);
REQUIRE(static_cast<TensorImpl_arrayfire<int>*>(x.getImpl().get())->data().dims(2) == 2);
REQUIRE(static_cast<TensorImpl_arrayfire<int>*>(x.getImpl().get())->data().numdims() == 3);
REQUIRE(static_cast<TensorImpl_arrayfire<int>*>(x.getImpl().get())->data().elements() == 8);
REQUIRE(static_cast<TensorImpl_arrayfire<int>*>(x.getImpl().get())->data().bytes() == 8 * sizeof(int32_t));
}
SECTION("Access to array") {
REQUIRE(static_cast<int*>(x.getImpl()->rawPtr())[0] == 1);
REQUIRE(static_cast<int*>(x.getImpl()->rawPtr())[7] == 8);
}
SECTION("get function") {
REQUIRE(x.get<int>({0,0,0}) == 1);
REQUIRE(x.get<int>({0,0,1}) == 2);
REQUIRE(x.get<int>({0,1,1}) == 4);
REQUIRE(x.get<int>({1,1,0}) == 7);
x.set<int>({1, 1, 1}, 36);
REQUIRE(x.get<int>({1,1,1}) == 36);
}
SECTION("Pretty printing for debug") {
REQUIRE_NOTHROW(x.print());
}
SECTION("Tensor (in)equality") {
REQUIRE(x == xCopy);
REQUIRE_FALSE(x == xFloat);
}
}
SECTION("from const array before backend") {
Tensor x = Array3D<int,2,2,2>{
{
{
{1, 2},
{3, 4}
},
{
{5, 6},
{7, 8}
}
}};
x.setBackend("arrayfire");
REQUIRE(x.nbDims() == 3);
REQUIRE(x.dims()[0] == 2);
REQUIRE(x.dims()[1] == 2);
REQUIRE(x.dims()[2] == 2);
REQUIRE(x.size() == 8);
REQUIRE(x.get<int>({0,0,0}) == 1);
REQUIRE(x.get<int>({0,0,1}) == 2);
REQUIRE(x.get<int>({0,1,1}) == 4);
REQUIRE(x.get<int>({1,1,1}) == 8);
}
}
\ No newline at end of file
0.0.3
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment