Newer
Older
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/ReLU.hpp"
#include "aidge/utils/Types.h"
laurent soulier
committed
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
#include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp"
#include "aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp"
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// this implementation can be in-place
return 0;
}
void Aidge::ReLUImpl_cpu::forward() {
std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
AIDGE_ASSERT(in0, "missing input #0");
// Find the correct kernel type
auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({
std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
kernelFunc(in0->size(),
laurent soulier
committed
getCPUPtr(mOp.getRawInput(0)),
getCPUPtr(mOp.getRawOutput(0)));
void Aidge::ReLUImpl_cpu::backward() {
// reversing in and out Tensors
const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
std::shared_ptr<Tensor> in0 = op_.getOutput(0)->grad();
std::shared_ptr<Tensor> out0 = op_.getInput(0)->grad();
AIDGE_ASSERT(out0, "current {} operator output#0 has not gradient Tensor.", op_.type());
// Find the correct kernel type
auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({
in0->dataType(),
out0->dataType()
});
// Call kernel
kernelFunc(in0->size(), getCPUPtr(in0), getCPUPtr(out0));
}