Skip to content
Snippets Groups Projects
Commit 740a27ba authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'dev' into 'main'

v0.4.0

See merge request !118
parents a2477ff1 ed718fc0
No related branches found
No related tags found
1 merge request!118v0.4.0
Pipeline #61247 passed
......@@ -15,6 +15,7 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/backend/cpu.hpp"
......@@ -153,7 +154,7 @@ TEST_CASE("[cpu/operator] Conv(forward)", "[Conv][CPU]") {
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myConv->forward();
// op->getOutput(0)->print();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *myOutput);
}
SECTION("Point-wise") {
......@@ -251,4 +252,147 @@ TEST_CASE("[cpu/operator] Conv(forward)", "[Conv][CPU]") {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
SECTION("Strided and dilated Conv") {
std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv", {3,3},{2,2});
auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float,2,3,8,8> {
{{{
{0.0107F, 0.5076F, 0.2293F, 0.0486F, 0.7375F, 0.2637F, 0.9615F, 0.9138F},
{0.0678F, 0.5604F, 0.1940F, 0.0287F, 0.1029F, 0.2059F, 0.5058F, 0.9885F},
{0.9904F, 0.2890F, 0.4606F, 0.1055F, 0.9028F, 0.1654F, 0.6499F, 0.4775F},
{0.9499F, 0.4695F, 0.1713F, 0.0731F, 0.4913F, 0.8921F, 0.1782F, 0.1111F},
{0.2479F, 0.4669F, 0.1078F, 0.6153F, 0.0299F, 0.6484F, 0.2397F, 0.1814F},
{0.3779F, 0.9032F, 0.5651F, 0.3896F, 0.8439F, 0.6404F, 0.3813F, 0.0841F},
{0.5566F, 0.8950F, 0.1226F, 0.8881F, 0.9870F, 0.6256F, 0.6387F, 0.0628F},
{0.2857F, 0.0579F, 0.6247F, 0.1286F, 0.0951F, 0.1268F, 0.9510F, 0.3789F}},
{{0.7648F, 0.5340F, 0.1024F, 0.4098F, 0.9958F, 0.7941F, 0.1190F, 0.7328F},
{0.4532F, 0.6598F, 0.9146F, 0.1690F, 0.6041F, 0.7230F, 0.5719F, 0.9282F},
{0.2862F, 0.2329F, 0.7302F, 0.6717F, 0.1983F, 0.1876F, 0.4561F, 0.2126F},
{0.7849F, 0.0239F, 0.7977F, 0.5935F, 0.9958F, 0.4703F, 0.4612F, 0.1627F},
{0.6393F, 0.3544F, 0.8643F, 0.5039F, 0.8087F, 0.6521F, 0.5086F, 0.9331F},
{0.7749F, 0.9798F, 0.6820F, 0.7869F, 0.5144F, 0.2941F, 0.8137F, 0.4561F},
{0.6505F, 0.3974F, 0.6909F, 0.7019F, 0.2729F, 0.4240F, 0.0162F, 0.1536F},
{0.3529F, 0.8821F, 0.1812F, 0.3426F, 0.3472F, 0.0300F, 0.8841F, 0.8088F}},
{{0.5099F, 0.3323F, 0.1488F, 0.3424F, 0.1494F, 0.6225F, 0.8103F, 0.5995F},
{0.9198F, 0.5635F, 0.8908F, 0.9378F, 0.6689F, 0.3176F, 0.3755F, 0.3883F},
{0.0626F, 0.5309F, 0.0307F, 0.3955F, 0.2794F, 0.1420F, 0.4758F, 0.7558F},
{0.6154F, 0.5280F, 0.2318F, 0.3832F, 0.4435F, 0.3490F, 0.4043F, 0.5872F},
{0.3705F, 0.3848F, 0.2182F, 0.8332F, 0.4559F, 0.5310F, 0.4611F, 0.4236F},
{0.6141F, 0.8103F, 0.2260F, 0.9907F, 0.5615F, 0.4520F, 0.6949F, 0.0175F},
{0.3969F, 0.5021F, 0.0970F, 0.9937F, 0.9270F, 0.4302F, 0.2868F, 0.3891F},
{0.8693F, 0.5170F, 0.5348F, 0.2676F, 0.9769F, 0.3356F, 0.9427F, 0.3908F}}
},
{
{{0.4803F, 0.5223F, 0.6395F, 0.8402F, 0.4442F, 0.6377F, 0.7852F, 0.9063F},
{0.0361F, 0.0470F, 0.3104F, 0.6921F, 0.0543F, 0.4490F, 0.9541F, 0.7395F},
{0.3832F, 0.3828F, 0.2236F, 0.2068F, 0.4369F, 0.7443F, 0.6952F, 0.6394F},
{0.5309F, 0.8483F, 0.1991F, 0.9756F, 0.8969F, 0.7284F, 0.4657F, 0.5486F},
{0.8839F, 0.3260F, 0.6892F, 0.4074F, 0.9473F, 0.5526F, 0.4147F, 0.4786F},
{0.9674F, 0.0952F, 0.8379F, 0.2163F, 0.9420F, 0.4046F, 0.1339F, 0.5234F},
{0.4213F, 0.8392F, 0.3184F, 0.4576F, 0.9349F, 0.8267F, 0.0931F, 0.8009F},
{0.5570F, 0.5871F, 0.4175F, 0.5465F, 0.6679F, 0.9224F, 0.0049F, 0.9421F}},
{{0.3739F, 0.6230F, 0.7613F, 0.1337F, 0.8527F, 0.0557F, 0.6424F, 0.8463F},
{0.7179F, 0.5638F, 0.2457F, 0.4579F, 0.0487F, 0.8693F, 0.8216F, 0.0415F},
{0.1724F, 0.5108F, 0.9103F, 0.0850F, 0.0080F, 0.8927F, 0.7706F, 0.3600F},
{0.7751F, 0.8828F, 0.7872F, 0.4541F, 0.3181F, 0.1855F, 0.2486F, 0.0033F},
{0.5558F, 0.3500F, 0.6034F, 0.1763F, 0.7418F, 0.5190F, 0.5147F, 0.4090F},
{0.4476F, 0.1249F, 0.8116F, 0.9091F, 0.1738F, 0.6150F, 0.3285F, 0.3133F},
{0.5657F, 0.4447F, 0.5049F, 0.3425F, 0.7443F, 0.2718F, 0.2466F, 0.5586F},
{0.3684F, 0.7616F, 0.5165F, 0.9621F, 0.2864F, 0.7747F, 0.8110F, 0.7045F}},
{{0.4570F, 0.4577F, 0.0373F, 0.6084F, 0.4632F, 0.3472F, 0.9917F, 0.2011F},
{0.7921F, 0.2202F, 0.9525F, 0.7274F, 0.3357F, 0.0076F, 0.5786F, 0.3034F},
{0.6510F, 0.0798F, 0.2757F, 0.1738F, 0.3046F, 0.2197F, 0.3872F, 0.5650F},
{0.1532F, 0.3204F, 0.6094F, 0.3287F, 0.8903F, 0.9773F, 0.7950F, 0.2845F},
{0.2482F, 0.3395F, 0.8795F, 0.4325F, 0.1395F, 0.2457F, 0.2968F, 0.5424F},
{0.8636F, 0.7426F, 0.2151F, 0.6900F, 0.3938F, 0.0062F, 0.4980F, 0.4098F},
{0.8026F, 0.0464F, 0.2662F, 0.7835F, 0.8444F, 0.0688F, 0.8796F, 0.7625F},
{0.2764F, 0.5341F, 0.1773F, 0.6671F, 0.7555F, 0.5235F, 0.7142F, 0.9423F}}}}
});
std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<float,4> {{ 0.1902F, -0.1789F, -0.0314F, -0.0589F}});
std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<float,4,3,3,3> { //NCHW
{
{
{{ 0.0039F, 0.1098F, -0.0834F},
{-0.0890F, 0.0725F, -0.1178F},
{ 0.1056F, -0.0924F, -0.0574F}},
{{ 0.0070F, -0.0730F, -0.0674F},
{-0.0380F, -0.1025F, -0.0085F},
{-0.1451F, -0.0656F, 0.1137F}},
{{ 0.1020F, 0.1025F, -0.0678F},
{ 0.0028F, 0.1512F, -0.0871F},
{ 0.1563F, -0.1446F, -0.1636F}}
},
{
{{ 0.1472F, 0.0025F, -0.0281F},
{ 0.0350F, 0.0296F, -0.1711F},
{-0.1197F, -0.1198F, -0.1130F}},
{{-0.1492F, 0.1554F, -0.1044F},
{ 0.1203F, -0.1596F, 0.0589F},
{-0.0436F, -0.1876F, -0.0816F}},
{{ 0.1572F, -0.0982F, 0.1293F},
{ 0.1358F, 0.1559F, 0.1322F},
{ 0.0296F, -0.0354F, -0.0632F}}
},
{
{{-0.0941F, -0.0479F, 0.0908F},
{-0.1319F, -0.1333F, 0.1223F},
{-0.1098F, 0.1924F, 0.1075F}},
{{ 0.1796F, 0.0213F, 0.0626F},
{ 0.0275F, 0.1883F, -0.0818F},
{ 0.0363F, 0.0684F, 0.1094F}},
{{ 0.1131F, 0.1258F, -0.0558F},
{ 0.1498F, 0.0322F, -0.0186F},
{-0.1801F, -0.0358F, 0.1727F}}
},
{
{{-0.1500F, -0.0554F, -0.0994F},
{-0.0818F, -0.1223F, 0.1365F},
{ 0.1281F, 0.1507F, -0.0890F}},
{{-0.0444F, -0.1071F, -0.1632F},
{ 0.0757F, -0.1235F, 0.0408F},
{ 0.0401F, -0.1914F, 0.1772F}},
{{-0.0714F, 0.1582F, -0.0065F},
{-0.0119F, 0.1375F, -0.0727F},
{-0.1532F, -0.1826F, -0.0417F}}
}
}
});
std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,4,2,2> {
{
{
{{-0.2174F, -0.0778F},
{-0.2584F, 0.2303F}},
{{-0.7686F, -0.3879F},
{-0.1775F, 0.0119F}},
{{ 0.5180F, 0.5087F},
{ 0.5398F, 0.3476F}},
{{-0.5258F, -0.3128F},
{-0.6673F, -0.1827F}}
},
{
{{-0.1902F, -0.0467F},
{-0.3327F, -0.1701F}},
{{-0.5505F, -0.4875F},
{-0.4119F, -0.5726F}},
{{ 0.5777F, 0.4428F},
{ 0.6121F, 0.7221F}},
{{-0.6009F, -0.6335F},
{-0.5159F, -0.3353F}}
}
}
});
op->associateInput(0,myInput);
op->associateInput(1,myWeights);
op->associateInput(2,myBias);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
op->forwardDims();
myConv->forward();
op->getOutput(0)->print();
REQUIRE(approxEq<float>(*(op->getOutput(0)),*myOutput, 1e-3f, 1e-4f));
}
}
\ No newline at end of file
......@@ -124,7 +124,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
dims_in[1]; // averaging per channel : 1 addition per element in
// the channel + 1 division this for every batch
// create out nb_elems
std::vector<std::size_t> dims_out{dims_in[0], dims_in[1]};
std::vector<std::size_t> dims_out(dims_in.size(), 1);
dims_out[0] = dims_in[0];
dims_out[1] = dims_in[1];
const std::size_t out_nb_elems =
std::accumulate(dims_out.cbegin(), dims_out.cend(), std::size_t(1),
std::multiplies<std::size_t>());
......@@ -192,7 +194,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
// the channel + 1 division this for every batch
// create out nb_elems
std::vector<std::size_t> dims_out{dims_in[0], dims_in[1]};
std::vector<std::size_t> dims_out(dims_in.size(), 1);
dims_out[0] = dims_in[0];
dims_out[1] = dims_in[1];
const std::size_t out_nb_elems =
std::accumulate(dims_out.cbegin(), dims_out.cend(),
std::size_t(1), std::multiplies<std::size_t>());
......@@ -253,7 +257,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
SECTION("2D_img") {
const std::vector<DimSize_t> in_dims{batch_size, channels, height,
width};
const std::vector<DimSize_t> out_dims{batch_size, channels};
std::vector<std::size_t> out_dims(in_dims.size(), 1);
out_dims[0] = in_dims[0];
out_dims[1] = in_dims[1];
DimSize_t in_nb_elems = batch_size * channels * height * width;
DimSize_t out_nb_elems = batch_size * channels;
number_of_operation +=
......@@ -368,7 +374,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
SECTION("3D_img") {
const std::vector<DimSize_t> in_dims{batch_size, channels, height,
width, depth};
const std::vector<DimSize_t> out_dims{batch_size, channels};
std::vector<std::size_t> out_dims(in_dims.size(), 1);
out_dims[0] = in_dims[0];
out_dims[1] = in_dims[1];
DimSize_t in_nb_elems =
batch_size * channels * height * width * depth;
number_of_operation +=
......
......@@ -134,7 +134,7 @@ TEST_CASE("[cpu/operator] Pad(forward)", "[Pad][CPU]") {
SECTION("Asymmetric Pad") {
const int pv = 0; // pad value
std::shared_ptr<Node> myPad = Pad<2>({0, 1, 1, 0}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
std::shared_ptr<Node> myPad = Pad<2>({1, 0, 0, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
{
......
This diff is collapsed.
This diff is collapsed.
......@@ -22,12 +22,12 @@
using namespace Aidge;
TEST_CASE("[ConstantFolding] test") {
TEST_CASE("[ConstantFolding] forward", "[ConstantFolding][forward][CPU]") {
// generate the original GraphView
auto matmul0 = MatMul("matmul0");
auto add0 = Add(2, "add0");
auto add0 = Add("add0");
auto matmul1 = MatMul("matmul1");
auto add1 = Add(2, "add1");
auto add1 = Add("add1");
auto b0 = Producer(std::make_shared<Tensor>(Array1D<float,5>{{1, 2, 3, 4, 5}}), "B0", true);
auto w0 = Producer(std::make_shared<Tensor>(Array2D<float,5,5>{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}, {1, 2, 3, 4, 5}}}), "W0", true);
......
This diff is collapsed.
......@@ -147,10 +147,13 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
std::shared_ptr<GraphView> g =
Sequential({Conv(1, 3, {3, 3}, "inputConv"),
Parallel({
Conv(3, 3, {1, 1}, "conv1.1"),
Conv(3, 3, {1, 1}, "conv1.2"),
Sequential({
Parallel({
Conv(3, 3, {1, 1}, "conv1.1"),
Conv(3, 3, {1, 1}, "conv1.2")}),
Add("add1")}),
Conv(3, 3, {1, 1}, "conv1.3")}),
Add(3, "add1"),
Add("add2"),
Conv(3, 2, {1, 1}, "conv2"),
FC(18, 5, false, "out")});
......@@ -216,9 +219,9 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
std::shared_ptr<Tensor> biasTensor = std::make_shared<Tensor>(
Array2D<int, 2, 3>{{{2, 0, 0}, {1, 0, 0}}});
auto add1 = Add(2, "add1");
auto add1 = Add("add1");
auto mem = Memorize(3, "mem1");
auto add2 = Add(2, "add2");
auto add2 = Add("add2");
auto bias = Producer(biasTensor, "bias");
auto init = Producer(initTensor, "init");
auto input = Producer(in, "input");
......@@ -260,9 +263,9 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") {
std::shared_ptr<Tensor> biasTensor = std::make_shared<Tensor>(
Array2D<int, 2, 3>{{{2, 0, 0}, {1, 0, 0}}});
auto add1 = Add(2, "add1");
auto add1 = Add("add1");
auto mem = Memorize(3, "mem1");
auto add2 = Add(2, "add2");
auto add2 = Add("add2");
auto bias = Producer(biasTensor, "bias");
auto init = Producer(initTensor, "init");
auto input = Producer(in, "input");
......
0.3.2
0.4.0
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment