Skip to content
Snippets Groups Projects
Commit fd4dd69d authored by Jerome Hue's avatar Jerome Hue
Browse files

Add a new test for a full nn using leaky neuron

parent 37bb4cb9
No related branches found
No related tags found
No related merge requests found
Pipeline #61380 failed
...@@ -29,9 +29,6 @@ void Aidge::SubImpl_cpu::forward() { ...@@ -29,9 +29,6 @@ void Aidge::SubImpl_cpu::forward() {
// Find the correct kernel type // Find the correct kernel type
const auto impl = Registrar<SubImpl_cpu>::create(getBestMatch(getRequiredSpec())); const auto impl = Registrar<SubImpl_cpu>::create(getBestMatch(getRequiredSpec()));
Log::info("Sub Operator Kernel");
op_.getInput(0)->print();
op_.getInput(1)->print();
// Call kernel // Call kernel
impl.forward(op_.getInput(0)->dims(), impl.forward(op_.getInput(0)->dims(),
......
...@@ -10,16 +10,18 @@ ...@@ -10,16 +10,18 @@
********************************************************************************/ ********************************************************************************/
#include <aidge/filler/Filler.hpp> #include <aidge/filler/Filler.hpp>
#include <aidge/operator/FC.hpp>
#include <catch2/catch_test_macros.hpp> #include <catch2/catch_test_macros.hpp>
#include <cmath> #include <cmath>
#include <cstdlib> #include <cstdlib>
#include <memory> #include <memory>
#include <random> #include <random>
#include "aidge/backend/cpu/operator/ConvImpl.hpp" #include "aidge/backend/cpu/operator/ConvImpl.hpp"
#include "aidge/backend/cpu/operator/PadImpl.hpp" #include "aidge/backend/cpu/operator/PadImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/operator/Conv.hpp" #include "aidge/operator/Conv.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/operator/Identity.hpp" #include "aidge/operator/Identity.hpp"
#include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp" #include "aidge/operator/MetaOperatorDefs.hpp"
...@@ -29,6 +31,7 @@ ...@@ -29,6 +31,7 @@
#include "aidge/scheduler/ParallelScheduler.hpp" #include "aidge/scheduler/ParallelScheduler.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/utils/TensorUtils.hpp" #include "aidge/utils/TensorUtils.hpp"
#include "aidge/filler/Filler.hpp"
using namespace Aidge; using namespace Aidge;
...@@ -211,6 +214,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { ...@@ -211,6 +214,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
PaddedConv(3, 4, {3, 3}, "myPaddedConv", {1, 1}, {1, 1, 1, 1}); PaddedConv(3, 4, {3, 3}, "myPaddedConv", {1, 1}, {1, 1, 1, 1});
} }
SECTION("LSTM(forward)") { SECTION("LSTM(forward)") {
auto pop = Pop(); auto pop = Pop();
auto myLSTM = LSTM(32, 64, 0, true, "ltsm"); auto myLSTM = LSTM(32, 64, 0, true, "ltsm");
auto op = auto op =
...@@ -279,6 +283,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { ...@@ -279,6 +283,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
REQUIRE(microGraphScheduler->getStaticScheduling(1).size() == 24); REQUIRE(microGraphScheduler->getStaticScheduling(1).size() == 24);
REQUIRE(microGraphScheduler->getStaticScheduling(15).size() == 24); REQUIRE(microGraphScheduler->getStaticScheduling(15).size() == 24);
} }
SECTION("LSTM(forward_values)") { SECTION("LSTM(forward_values)") {
auto myLSTM = LSTM(2, 3, 0, true, "ltsm"); auto myLSTM = LSTM(2, 3, 0, true, "ltsm");
auto op = auto op =
...@@ -348,6 +353,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { ...@@ -348,6 +353,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState)); REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState));
} }
SECTION("LSTM(forward_values_seq)") { SECTION("LSTM(forward_values_seq)") {
auto pop = Pop(); auto pop = Pop();
auto myLSTM = LSTM(2, 3, 2, true, "ltsm"); auto myLSTM = LSTM(2, 3, 2, true, "ltsm");
...@@ -413,6 +419,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { ...@@ -413,6 +419,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState)); REQUIRE(approxEq<float>(*(op->getOutput(0)), *myHiddenState));
} }
SECTION("LSTM(forward_values_seq_flatten)(sequential)") { SECTION("LSTM(forward_values_seq_flatten)(sequential)") {
auto pop = Pop(); auto pop = Pop();
auto myLSTM = LSTM(2, 3, 2, true, "ltsm"); auto myLSTM = LSTM(2, 3, 2, true, "ltsm");
...@@ -592,18 +599,103 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") { ...@@ -592,18 +599,103 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
} }
SECTION("Leaky(forward)(fixed)") { SECTION("Leaky(forward)(fixed)") {
constexpr auto inChannels = 10;
constexpr auto outChannels = 5;
std::shared_ptr<Tensor> input = std::make_shared<Tensor>( constexpr auto beta = 0.95;
Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}},
{{2.0, 3.0}, {4.0, 5.0}, {6.0, 7.0}}}});
constexpr auto beta = 0.9;
constexpr auto threshold = 1.0; constexpr auto threshold = 1.0;
auto pop = Pop("pop"); constexpr auto nbTimeSteps = 2;
auto leaky = Leaky(2, beta, threshold, "leaky");
auto myWeights = std::make_shared<Tensor>(Array2D<float, outChannels, inChannels>{{
{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1},
{0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4},
{0.4, 0.3, 0.2, 0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5},
{0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0},
}});
auto myWeights2 = std::make_shared<Tensor>(Array2D<float, inChannels, outChannels>{{
{0.1, 0.2, 0.3, 0.4, 0.5},
{0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6},
{0.5, 0.4, 0.3, 0.2, 0.1},
{0.5, 0.6, 0.7, 0.8, 0.9},
{1.0, 0.1, 0.2, 0.3, 0.4},
{0.4, 0.3, 0.2, 0.1, 0.0},
{0.1, 0.2, 0.3, 0.4, 0.5},
{0.9, 0.8, 0.7, 0.6, 0.5},
{0.4, 0.3, 0.2, 0.1, 0.0},
}});
auto myInput = std::make_shared<Tensor>(Array2D<float, 2, 10>{{
{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0},
{1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1},
}});
// py/snn Torch computed result, output of fc1 at time step 1
auto expectedOutputlif1ts1 = std::make_shared<Tensor>(Array2D<float,2,5>{{
{3.850, 2.2000, 2.6500, 1.5000, 1.6500},
{2.200, 3.8500, 3.4000, 1.2500, 3.3000},
}});
auto expectedOutputfc2ts1 = std::make_shared<Tensor>(Array2D<float,2,10>{{
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
}});
auto expectedOutputlif1ts2 = std::make_shared<Tensor>(Array2D<float,2,5>{{
{6.5075, 3.2900, 4.1675, 1.9250, 2.2175},
{3.2900, 6.5075, 5.6300, 1.4375, 5.4350},
}});
// NOTE: Same output as before, because for all channels, we have a potential higher than threshold.
// Thus the lif neuron fires at every timestep for every channel.
auto expectedOutputfc2ts2 = std::make_shared<Tensor>(Array2D<float,2,10>{{
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
{1.5000, 4.0000, 4.0000, 1.5000, 3.5000, 2.0000, 1.0000, 1.5000, 3.5000, 1.0000},
}});
auto init = std::make_shared<Tensor>(Array2D<float, 2, 5>{});
uniformFiller<float>(init, 0.0, 0.0);
auto fc1 = FC(inChannels, outChannels, true, "myfc");
auto fc2 = FC(outChannels, inChannels, true, "fc2");
// NOTE: Account for init step by adding 1 to the max timestep parameter.
auto lif1 = Leaky(nbTimeSteps+1, beta, threshold, "leaky");
// associateInput() does not work
fc1->input(1).first->getOperator()->setOutput(0, myWeights);
fc2->input(1).first->getOperator()->setOutput(0, myWeights2);
auto fc1Op = std::static_pointer_cast<OperatorTensor>(fc1->getOperator());
auto lif1Op = std::static_pointer_cast<MetaOperator_Op>(lif1->getOperator());
auto fc2Op = std::static_pointer_cast<OperatorTensor>(fc2->getOperator());
fc1Op->associateInput(0, myInput);
lif1Op->associateInput(1, init);
lif1Op->associateInput(2, init);
fc1->addChild(lif1, 0, 0);
lif1->addChild(fc2, 1, 0);
auto g = std::make_shared<GraphView>();
g->add({fc1, lif1, fc2});
g->compile("cpu", DataType::Float32);
auto scheduler = SequentialScheduler(g);
REQUIRE(true); // Forward 1 (simulate timestep 0)
scheduler.forward(true);
REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)), *(expectedOutputlif1ts1)));
REQUIRE(approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts1)));
// Forward 1 (simulate timestep 1)
scheduler.forward(true);
REQUIRE(approxEq<float>(*(lif1Op->getOutput(0)), *(expectedOutputlif1ts2)));
REQUIRE(approxEq<float>(*(fc2Op->getOutput(0)), *(expectedOutputfc2ts2)));
} }
SECTION("Leaky(forward)") { SECTION("Leaky(forward)") {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment