Skip to content
Snippets Groups Projects
Commit 948d024b authored by Jerome Hue's avatar Jerome Hue Committed by Olivier BICHLER
Browse files

Format files

parent 623c55c6
No related branches found
No related tags found
1 merge request!131CHORE: Add tests for Leaky MetaOperator
......@@ -36,7 +36,6 @@
#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp"
#include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
#include "aidge/backend/cpu/operator/LRNImpl.hpp"
#include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
#include "aidge/backend/cpu/operator/LnImpl.hpp"
#include "aidge/backend/cpu/operator/MatMulImpl.hpp"
......
......@@ -17,9 +17,10 @@
#include <memory>
#include <random>
#include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/PadImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/filler/Filler.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/operator/Identity.hpp"
......@@ -31,7 +32,6 @@
#include "aidge/scheduler/ParallelScheduler.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/utils/TensorUtils.hpp"
#include "aidge/filler/Filler.hpp"
using namespace Aidge;
......@@ -599,80 +599,125 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
}
SECTION("Leaky(forward)(fixed)") {
constexpr auto inChannels = 10;
constexpr auto outChannels = 5;
constexpr auto beta = 0.95;
constexpr auto threshold = 1.0;
constexpr auto threshold = 1.0;
constexpr auto nbTimeSteps = 2;
auto myWeights = std::make_shared<Tensor>(Array2D<float, outChannels, inChannels>{{
{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1},
{0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4},
{0.4, 0.3, 0.2, 0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5},
{0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0},
}});
auto myWeights2 = std::make_shared<Tensor>(Array2D<float, inChannels, outChannels>{{
{0.1, 0.2, 0.3, 0.4, 0.5},
{0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6},
{0.5, 0.4, 0.3, 0.2, 0.1},
{0.5, 0.6, 0.7, 0.8, 0.9},
{1.0, 0.1, 0.2, 0.3, 0.4},
{0.4, 0.3, 0.2, 0.1, 0.0},
{0.1, 0.2, 0.3, 0.4, 0.5},
{0.9, 0.8, 0.7, 0.6, 0.5},
{0.4, 0.3, 0.2, 0.1, 0.0},
}});
auto myInput = std::make_shared<Tensor>(Array2D<float, 2, 10>{{
auto myWeights =
std::make_shared<Tensor>(Array2D<float, outChannels, inChannels>{{
{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1},
{0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4},
{0.4, 0.3, 0.2, 0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5},
{0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0},
}});
auto myWeights2 =
std::make_shared<Tensor>(Array2D<float, inChannels, outChannels>{{
{0.1, 0.2, 0.3, 0.4, 0.5},
{0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6},
{0.5, 0.4, 0.3, 0.2, 0.1},
{0.5, 0.6, 0.7, 0.8, 0.9},
{1.0, 0.1, 0.2, 0.3, 0.4},
{0.4, 0.3, 0.2, 0.1, 0.0},
{0.1, 0.2, 0.3, 0.4, 0.5},
{0.9, 0.8, 0.7, 0.6, 0.5},
{0.4, 0.3, 0.2, 0.1, 0.0},
}});
auto myInput = std::make_shared<Tensor>(Array2D<float, 2, 10>{{
{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1},
}});
// py/snn Torch computed result, output of fc1 at time step 1
auto expectedOutputlif1ts1 = std::make_shared<Tensor>(Array2D<float,2,5>{{
{3.850, 2.2000, 2.6500, 1.5000, 1.6500},
{2.200, 3.8500, 3.4000, 1.2500, 3.3000},
}});
auto expectedOutputfc2ts1 = std::make_shared<Tensor>(Array2D<float,2,10>{{
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
}});
auto expectedOutputlif1ts2 = std::make_shared<Tensor>(Array2D<float,2,5>{{
{6.5075, 3.2900, 4.1675, 1.9250, 2.2175},
{3.2900, 6.5075, 5.6300, 1.4375, 5.4350},
}});
// NOTE: Same output as before, because for all channels, we have a potential higher than threshold.
// Thus the lif neuron fires at every timestep for every channel.
auto expectedOutputfc2ts2 = std::make_shared<Tensor>(Array2D<float,2,10>{{
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
}});
auto expectedOutputlif1ts1 =
std::make_shared<Tensor>(Array2D<float, 2, 5>{{
{3.850, 2.2000, 2.6500, 1.5000, 1.6500},
{2.200, 3.8500, 3.4000, 1.2500, 3.3000},
}});
auto expectedOutputfc2ts1 =
std::make_shared<Tensor>(Array2D<float, 2, 10>{{
{1.5000,
4.0000,
4.0000,
1.5000,
3.5000,
2.0000,
1.0000,
1.5000,
3.5000,
1.0000},
{1.5000,
4.0000,
4.0000,
1.5000,
3.5000,
2.0000,
1.0000,
1.5000,
3.5000,
1.0000},
}});
auto expectedOutputlif1ts2 =
std::make_shared<Tensor>(Array2D<float, 2, 5>{{
{6.5075, 3.2900, 4.1675, 1.9250, 2.2175},
{3.2900, 6.5075, 5.6300, 1.4375, 5.4350},
}});
// NOTE: Same output as before, because for all channels, we have a
// potential higher than threshold. Thus the lif neuron fires at every
// timestep for every channel.
auto expectedOutputfc2ts2 =
std::make_shared<Tensor>(Array2D<float, 2, 10>{{
{1.5000,
4.0000,
4.0000,
1.5000,
3.5000,
2.0000,
1.0000,
1.5000,
3.5000,
1.0000},
{1.5000,
4.0000,
4.0000,
1.5000,
3.5000,
2.0000,
1.0000,
1.5000,
3.5000,
1.0000},
}});
auto init = std::make_shared<Tensor>(Array2D<float, 2, 5>{});
uniformFiller<float>(init, 0.0, 0.0);
auto fc1 = FC(inChannels, outChannels, true, "myfc");
auto fc2 = FC(outChannels, inChannels, true, "fc2");
// NOTE: Account for init step by adding 1 to the max timestep parameter.
auto lif1 = Leaky(nbTimeSteps+1, beta, threshold, "leaky");
// NOTE: Account for init step by adding 1 to the max timestep
// parameter.
auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, "leaky");
// associateInput() does not work
fc1->input(1).first->getOperator()->setOutput(0, myWeights);
fc2->input(1).first->getOperator()->setOutput(0, myWeights2);
auto fc1Op = std::static_pointer_cast<OperatorTensor>(fc1->getOperator());
auto lif1Op = std::static_pointer_cast<MetaOperator_Op>(lif1->getOperator());
auto fc2Op = std::static_pointer_cast<OperatorTensor>(fc2->getOperator());
auto fc1Op =
std::static_pointer_cast<OperatorTensor>(fc1->getOperator());
auto lif1Op =
std::static_pointer_cast<MetaOperator_Op>(lif1->getOperator());
auto fc2Op =
std::static_pointer_cast<OperatorTensor>(fc2->getOperator());
fc1Op->associateInput(0, myInput);
lif1Op->associateInput(1, init);
......@@ -681,7 +726,6 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
fc1->addChild(lif1, 0, 0);
lif1->addChild(fc2, 1, 0);
auto g = std::make_shared<GraphView>();
g->add({fc1, lif1, fc2});
g->compile("cpu", DataType::Float32);
......@@ -689,13 +733,17 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
// Forward 1 (simulate timestep 0)
scheduler.forward(true);
REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)), *(expectedOutputlif1ts1)));
REQUIRE(approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts1)));
REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)),
*(expectedOutputlif1ts1)));
REQUIRE(
approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts1)));
// Forward 1 (simulate timestep 1)
scheduler.forward(true);
REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)), *(expectedOutputlif1ts2)));
REQUIRE(approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts2)));
REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)),
*(expectedOutputlif1ts2)));
REQUIRE(
approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts2)));
}
SECTION("Leaky(forward)") {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment