Skip to content
Snippets Groups Projects
Commit c0dbb037 authored by Grégoire Kubler's avatar Grégoire Kubler Committed by Olivier BICHLER
Browse files

feat : Addedd convolution 1/2D backward kernels

parent 50479099
No related branches found
No related tags found
1 merge request!142feat_operator_convtranspose
......@@ -13,45 +13,63 @@
#define AIDGE_CPU_OPERATOR_CONVIMPL_H_
#include <array>
#include <memory>
#include <tuple>
#include <vector>
#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
#include "aidge/operator/Conv.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
namespace Aidge {
// Operator implementation entry point for the backend
using Conv1D_Op = Conv_Op<1>;
using ConvImpl1D_cpu = OperatorImpl_cpu<Conv_Op<1>,
void(const std::array<DimSize_t, 1>&,
const std::array<DimSize_t, 1>&,
const std::array<DimSize_t, 1>&,
const std::array<DimSize_t, 3> &,
DimSize_t,
const void *,
const void *,
const void *,
void *)>;
void(const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 3> &,
DimSize_t,
const void *,
const void *,
const void *,
void *),
void(const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 1> &,
const std::array<DimSize_t, 3> &,
const std::array<DimSize_t, 3> &,
const void *,
const void *,
const void *,
void *,
void *,
void *)>;
using Conv2D_Op = Conv_Op<2>;
using ConvImpl2D_cpu = OperatorImpl_cpu<Conv_Op<2>,
void(const std::array<DimSize_t, 2>&,
const std::array<DimSize_t, 2>&,
const std::array<DimSize_t, 2>&,
const std::array<DimSize_t, 4> &,
DimSize_t,
const void *,
const void *,
const void *,
void *)>;
using ConvImpl2D_cpu = OperatorImpl_cpu<Conv2D_Op,
void(const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 4> &,
DimSize_t,
const void *,
const void *,
const void *,
void *),
void(const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 2> &,
const std::array<DimSize_t, 4> &,
const std::array<DimSize_t, 4> &,
const void *,
const void *,
const void *,
void *,
void *,
void *)>;
// Implementation entry point registration to Operator
REGISTRAR(Conv1D_Op, "cpu", Aidge::ConvImpl1D_cpu::create);
REGISTRAR(Conv2D_Op, "cpu", Aidge::ConvImpl2D_cpu::create);
} // namespace Aidge
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_CONVIMPL_H_ */
This diff is collapsed.
......@@ -22,6 +22,8 @@
#include "aidge/operator/Conv.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <>
void Aidge::ConvImpl1D_cpu::forward() {
const auto& op_ = static_cast<const Conv_Op<1>&>(mOp);
......@@ -55,9 +57,47 @@ void Aidge::ConvImpl1D_cpu::forward() {
);
}
template <>
void Aidge::ConvImpl1D_cpu::backward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Conv_Op<1> on backend cpu");
template <> void ConvImpl1D_cpu::backward() {
const auto &op = dynamic_cast<const Conv1D_Op &>(mOp);
const auto &outputGrad = op.getOutput(0)->grad();
AIDGE_ASSERT(outputGrad, "{}: missing ouput #0 gradient", op.type());
AIDGE_ASSERT(op.getInput(0)->grad(),
"{}: missing data input(#0) gradient",
op.type());
AIDGE_ASSERT(op.getInput(1)->grad(),
"{}: missing weight input(#1) gradient",
op.type());
std::shared_ptr<Tensor> inputDataGradFallback, inputWeightGradFallback,
inputBiasGradFallback;
const auto &inputDataGrad =
op.getInput(0)->grad()->refCastFrom(inputDataGradFallback,
*(op.getOutput(0)));
const auto &inputWeightGrad =
op.getInput(1)->grad()->refCastFrom(inputWeightGradFallback,
*(op.getOutput(0)));
const auto &inputBiasGrad =
(op.getInput(2) && op.getInput(2)->grad())
? op.getInput(2)->grad()->refCastFrom(inputBiasGradFallback,
*(op.getOutput(0)))
: Tensor();
// Call kernel
const auto impl =
Registrar<ConvImpl1D_cpu>::create(getBestMatch(getRequiredSpec()));
impl.backward(
op.strideDims(),
op.dilationDims(),
op.kernelDims(),
op.getInput(0)->template dims<3>(),
op.getOutput(0)->template dims<3>(),
getCPUPtr(op.getInput(0)),
getCPUPtr(op.getInput(1)),
getCPUPtr(outputGrad),
inputDataGrad.getImpl()->rawPtr(),
inputWeightGrad.getImpl()->rawPtr(),
op.getInput(2) ? inputBiasGrad.getImpl()->rawPtr() : nullptr);
}
template <>
......@@ -93,7 +133,48 @@ void Aidge::ConvImpl2D_cpu::forward() {
);
}
template <>
void Aidge::ConvImpl2D_cpu::backward() {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Conv_Op<2> on backend cpu");
template <> void ConvImpl2D_cpu::backward() {
const auto &op = dynamic_cast<const Conv2D_Op &>(mOp);
const auto &outputGrad = op.getOutput(0)->grad();
AIDGE_ASSERT(outputGrad, "{}: missing ouput #0 gradient", op.type());
AIDGE_ASSERT(op.getInput(0)->grad(),
"{}: missing data input(#0) gradient",
op.type());
AIDGE_ASSERT(op.getInput(1)->grad(),
"{}: missing weight input(#1) gradient",
op.type());
std::shared_ptr<Tensor> inputDataGradFallback, inputWeightGradFallback,
inputBiasGradFallback;
const auto &inputDataGrad =
op.getInput(0)->grad()->refCastFrom(inputDataGradFallback,
*(op.getOutput(0)));
const auto &inputWeightGrad =
op.getInput(1)->grad()->refCastFrom(inputWeightGradFallback,
*(op.getOutput(0)));
const auto &inputBiasGrad =
(op.getInput(2) && op.getInput(2)->grad())
? op.getInput(2)->grad()->refCastFrom(inputBiasGradFallback,
*(op.getOutput(0)))
: Tensor();
// Call kernel
const auto impl =
Registrar<ConvImpl2D_cpu>::create(getBestMatch(getRequiredSpec()));
impl.backward(
op.strideDims(),
op.dilationDims(),
op.kernelDims(),
op.getInput(0)->template dims<4>(),
op.getOutput(0)->template dims<4>(),
getCPUPtr(op.getInput(0)),
getCPUPtr(op.getInput(1)),
getCPUPtr(outputGrad),
inputDataGrad.getImpl()->rawPtr(),
inputWeightGrad.getImpl()->rawPtr(),
op.getInput(2) ? inputBiasGrad.getImpl()->rawPtr() : nullptr);
}
} // namespace Aidge
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment