Skip to content
Snippets Groups Projects
Commit b4760e97 authored by Jerome Hue's avatar Jerome Hue
Browse files

WIP - Leaky backward

parent b724c161
No related branches found
No related tags found
No related merge requests found
Pipeline #68716 failed
......@@ -136,7 +136,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
B* biasesGrad = static_cast<B*>(biasesGrad_);
Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Bias grad");
// bias grad
if (biasesGrad == nullptr) { // no bias
// Do nothing
......@@ -151,7 +150,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
}
}
Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Weight grad");
// weight grad
for (std::size_t o = 0; o < outputFeatureSize; ++o) {
for (std::size_t c = 0; c < inputFeatureSize; ++c) {
......@@ -163,7 +161,6 @@ void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
}
}
Log::info("\t\t Call to FCImpl_cpu_backward_kernel - Doing Input grad");
// input grad
for (std::size_t b = 0; b < batchSize; ++b) {
for (std::size_t c = 0; c < inputFeatureSize; ++c) {
......
......@@ -194,6 +194,7 @@ void MulImpl_cpu_backward_kernel(const std::size_t input0Length,
auto idx0 = getFlattenedIndex(broadcastedDims0, idxInput0);
auto idx1 = getFlattenedIndex(broadcastedDims1, idxInput1);
Log::info("Multiplication by {}", input1[idx1]);
grad_input_0[idx0] += static_cast<I1>(grad_output[i] * input1[idx1]);
grad_input_1[idx1] += static_cast<I2>(grad_output[i] * input0[idx0]);
}
......
......@@ -77,4 +77,10 @@ void Aidge::AddImpl_cpu::backward() {
getCPUPtr(in0grad),
getCPUPtr(in1grad));
Log::info("AddImpl_cpu::backward() : Gradient of output 0");
out0grad->print();
Log::info("AddImpl_cpu::backward() : Gradient of input 0");
in0grad->print();
Log::info("AddImpl_cpu::backward() : Gradient of input 1");
in1grad->print();
}
......@@ -55,7 +55,7 @@ void Aidge::FCImpl_cpu::forward()
template <>
void Aidge::FCImpl_cpu::backward()
{
Log::notice("\t Call to Aidge::FCIMpl_cpu::backward()");
Log::info("\t Call to Aidge::FCIMpl_cpu::backward()");
const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
const auto& fc_grad = op_.getOutput(0)->grad();
AIDGE_ASSERT(fc_grad, "missing ouput #0 gradient");
......@@ -63,7 +63,6 @@ void Aidge::FCImpl_cpu::backward()
AIDGE_ASSERT(op_.getInput(1)->grad(), "missing input #1 gradient");
const auto impl = Registrar<FCImpl_cpu>::create(getBestMatch(getRequiredSpec()));
Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Found impl in registrar");
// Convert input data (no overhead if not needed!)
// TODO: right now, if needed, memory will be allocated/deallocated at each
......@@ -76,7 +75,6 @@ void Aidge::FCImpl_cpu::backward()
// Call kernel
const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
Log::notice("\t Call to Aidge::FCIMpl_cpu::backward() - Retrieved grad pointers");
impl.backward(batchSize,
input1grad.dims()[1], // nb input features
input1grad.dims()[0], // nb output features
......
......@@ -52,6 +52,10 @@ void Aidge::MulImpl_cpu::backward() {
// Find the correct kernel type
const auto impl = Registrar<MulImpl_cpu>::create(getBestMatch(getRequiredSpec()));
Log::info("Debug printing in 1 : ");
in1->print();
Log::info("Debug printing in 0 : ");
in0->print();
// Call kernel
impl.backward(/* input0Length */ in0grad->size(),
......@@ -59,10 +63,10 @@ void Aidge::MulImpl_cpu::backward() {
/* grad0Length */ out0grad->size(),
/* input0Dims */ in0->dims(),
/* input1Dims */ in1->dims(),
out0grad->dims(),
getCPUPtr(in0),
getCPUPtr(in1),
getCPUPtr(out0grad),
/* outputDims */ out0grad->dims(),
/* input0_ */ getCPUPtr(in0),
/* input1_ */ getCPUPtr(in1),
/* grad_output_ */ getCPUPtr(out0grad),
getCPUPtr(in0grad),
getCPUPtr(in1grad));
......
......@@ -47,6 +47,7 @@ void Aidge::SubImpl_cpu::backward() {
auto in0 = op_.getInput(0);
auto in1 = op_.getInput(1);
auto out0 = op_.getOutput(0);
auto in0grad = op_.getInput(0)->grad();
auto in1grad = op_.getInput(1)->grad();
auto out0grad = op_.getOutput(0)->grad();
......@@ -58,6 +59,9 @@ void Aidge::SubImpl_cpu::backward() {
in0grad->print();
in1grad->print();
out0grad->print();
Log::info("Sub Impl, pointer of out0 : {}" , static_cast<void*>(out0.get()));
out0->print();
// Call kernel
impl.backward(/* input0Length */ in0grad->size(),
......
......@@ -858,7 +858,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
constexpr auto inChannels = 4;
constexpr auto outChannels = 4;
constexpr auto beta = 1.0;
constexpr auto beta = 0.8;
constexpr auto threshold = 1.0;
constexpr auto nbTimeSteps = 2;
......@@ -892,6 +892,7 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
auto lif1 = Leaky(nbTimeSteps, beta, threshold, LeakyReset::Subtraction, "leaky");
auto lif1Op = std::static_pointer_cast<OperatorTensor>(lif1->getOperator());
auto fc1Op = std::static_pointer_cast<OperatorTensor>(fc1->getOperator());
//fc1Op->associateInput(0, input);
......@@ -913,6 +914,17 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
graph->compile("cpu", DataType::Float32);
graph->forwardDims();
Log::info("Lif 1 op tensors : \
\n Input 0 : {} \
\n Input 1 : {} \
\n Output 0 : {} \
\n Output 1 : {}",
static_cast<void*>(lif1Op->getInput(0).get()),
static_cast<void*>(lif1Op->getInput(1).get()),
static_cast<void*>(lif1Op->getOutput(0).get()),
static_cast<void*>(lif1Op->getOutput(1).get())
);
auto scheduler = SequentialScheduler(graph);
REQUIRE_NOTHROW(scheduler.generateScheduling());
REQUIRE_NOTHROW(scheduler.forward(true));
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment