Skip to content
Snippets Groups Projects
Commit 5cf9facb authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Fixed backward for Sqrt and LeakyReLU

parent 05f02dd3
No related branches found
No related tags found
1 merge request!166Update 0.5.0 -> 0.6.0
Pipeline #70260 passed
......@@ -32,6 +32,7 @@ using LeakyReLUImpl_cpu = OperatorImpl_cpu<LeakyReLU_Op,
void(const float,
std::size_t,
const void*,
const void*,
void*)>;
// Implementation entry point registration to Operator
......
......@@ -36,14 +36,16 @@ template <class I, class O>
void LeakyReLUImpl_cpu_backward_kernel(const float negativeSlope_,
std::size_t inputLength,
const void* input_,
void* output_) {
const void* grad_output_,
void* grad_input_) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
const O* input = static_cast<const O*>(input_);
const I* grad_output = static_cast<const I*>(grad_output_);
O* grad_input = static_cast<O*>(grad_input_);
const I negativeSlope = static_cast<const I>(negativeSlope_);
for (std::size_t i = 0; i < inputLength; ++i) {
output[i] = (input[i] > 0) ? input[i] : negativeSlope*input[i];
grad_input[i] = (input[i] > 0) ? grad_output[i] : negativeSlope*grad_output[i];
}
}
......
......@@ -26,7 +26,7 @@ namespace Aidge {
// Operator implementation entry point for the backend
using SqrtImpl_cpu = OperatorImpl_cpu<Sqrt_Op,
void(const std::size_t, const void*, void*),
void(const std::size_t, const void*, void*)>;
void(const std::size_t, const void*, const void*, void*)>;
// Implementation entry point registration to Operator
REGISTRAR(Sqrt_Op, "cpu", Aidge::SqrtImpl_cpu::create);
......
......@@ -35,14 +35,16 @@ void SqrtImpl_cpu_forward_kernel(const std::size_t inputLength,
template <class I, class O>
void SqrtImpl_cpu_backward_kernel(const std::size_t inputLength,
const void* input_,
void* output_) {
const void* output_,
const void* grad_output_,
void* grad_input_) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
const I* output = static_cast<const I*>(output_);
const I* grad_output = static_cast<const I*>(grad_output_);
O* grad_input = static_cast<O*>(grad_input_);
for (std::size_t i = 0; i < inputLength; ++i) {
output[i] = static_cast<O>(0.5/(std::sqrt(static_cast<float>(input[i]))));
grad_input[i] = static_cast<O>(0.5/output[i]) * grad_output[i];
}
}
......
......@@ -43,8 +43,9 @@ template <>
void Aidge::LeakyReLUImpl_cpu::backward() {
// reversing in and out Data for backprop
const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp);
std::shared_ptr<Tensor> in0 = op_.getOutput(0)->grad();
std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad();
std::shared_ptr<Tensor> in0 = op_.getInput(0)->grad();
std::shared_ptr<Tensor> out0grad = op_.getOutput(0)->grad();
std::shared_ptr<Tensor> in0grad = op_.getInput(0)->grad();
AIDGE_ASSERT(in0, "missing input #0");
// Find the correct kernel type
......@@ -52,7 +53,8 @@ void Aidge::LeakyReLUImpl_cpu::backward() {
// Call kernel
impl.backward(op_.negativeSlope(),
in0->size(),
out0grad->size(),
getCPUPtr(in0),
getCPUPtr(out0));
getCPUPtr(out0grad),
getCPUPtr(in0grad));
}
\ No newline at end of file
......@@ -40,6 +40,7 @@ template <>
void Aidge::SqrtImpl_cpu::backward() {
// reversing in and out Data for backprop
const Sqrt_Op& op_ = dynamic_cast<const Sqrt_Op&>(mOp);
std::shared_ptr<Tensor> out0 = op_.getOutput(0);
std::shared_ptr<Tensor> out0grad = op_.getOutput(0)->grad();
std::shared_ptr<Tensor> in0grad = op_.getInput(0)->grad();
AIDGE_ASSERT(out0grad, "missing output #0");
......@@ -49,6 +50,7 @@ void Aidge::SqrtImpl_cpu::backward() {
// Call kernel
impl.backward(out0grad->size(),
getCPUPtr(out0),
getCPUPtr(out0grad),
getCPUPtr(in0grad));
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment