Skip to content
Snippets Groups Projects
Commit b9fd7b57 authored by Olivier Antoni's avatar Olivier Antoni
Browse files

Add log for BCE loss function

parent 3988b2f4
No related branches found
No related tags found
2 merge requests!73version 0.2.3,!68Add log operator for BCE loss function
Pipeline #48257 failed
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_LNIMPL_H_
#define AIDGE_CPU_OPERATOR_LNIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Ln.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Ln_Op;
// compute kernel registry for forward and backward
class LnImplForward_cpu
: public Registrable<LnImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
};
class LnImplBackward_cpu
: public Registrable<LnImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
};
class LnImpl_cpu : public OperatorImpl {
public:
LnImpl_cpu(const Ln_Op& op) : OperatorImpl(op, "cpu") {}
static std::unique_ptr<LnImpl_cpu> create(const Ln_Op& op) {
return std::make_unique<LnImpl_cpu>(op);
}
Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
void forward() override final;
void backward() override final;
};
namespace {
static Registrar<Ln_Op> registrarLnImpl_cpu("cpu", Aidge::LnImpl_cpu::create);
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_LNIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_LNIMPL_BACKWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_LNIMPL_BACKWARD_KERNEL_H_
#include <cstddef> // std::size_t
#include "aidge/backend/cpu/operator/LnImpl.hpp"
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
template <class I, class GI, class GO>
void LnImpl_cpu_backward_kernel(const std::size_t inputLenght,
const void* input_, const void* grad_output_,
void* grad_input_) {
const I* input = static_cast<const I*>(input_);
const GO* grad_output = static_cast<const GO*>(grad_output_);
GI* grad_input = static_cast<GI*>(grad_input_);
for (std::size_t i = 0; i < inputLenght; ++i) {
grad_input[i] = grad_output[i] / input[i];
}
}
namespace {
static Registrar<LnImplBackward_cpu> registrarLnImplBackward_cpu_Float32(
{DataType::Float32, DataType::Float32, DataType::Float32},
Aidge::LnImpl_cpu_backward_kernel<float, float, float>);
static Registrar<LnImplBackward_cpu> registrarLnImplBackward_cpu_Float64(
{DataType::Float64, DataType::Float64, DataType::Float64},
Aidge::LnImpl_cpu_backward_kernel<double, double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_LNIMPL_BACKWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_LNIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_LNIMPL_FORWARD_KERNEL_H_
#include "aidge/utils/Registrar.hpp"
#include "aidge/backend/cpu/operator/LnImpl.hpp"
namespace Aidge {
template <class I, class O>
void LnImpl_cpu_forward_kernel(std::size_t inputLenght,
const void* input_,
void* output_) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
//#pragma omp parallel for if (inputLenght > 1024)
for (std::size_t i = 0; i < inputLenght; ++i) {
output[i] = std::log(input[i]);
}
}
namespace {
static Registrar<LnImplForward_cpu> registrarLnImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32}, Aidge::LnImpl_cpu_forward_kernel<float, float>);
static Registrar<LnImplForward_cpu> registrarLnImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64}, Aidge::LnImpl_cpu_forward_kernel<double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_LNIMPL_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Ln.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/backend/cpu/operator/LnImpl.hpp"
#include "aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp"
#include "aidge/backend/cpu/operator/LnImpl_backward_kernels.hpp"
Aidge::Elts_t Aidge::LnImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// this implementation can be in-place
return Elts_t::DataElts(0);
}
void Aidge::LnImpl_cpu::forward() {
const Ln_Op& op_ = static_cast<const Ln_Op&>(mOp);
std::shared_ptr<Tensor> in0 = op_.getInput(0);
std::shared_ptr<Tensor> out0 = op_.getOutput(0);
AIDGE_ASSERT(in0, "missing input #0");
// Find the correct kernel type
auto kernelFunc = Registrar<LnImplForward_cpu>::create({
in0->dataType(),
out0->dataType()});
// Call kernel
kernelFunc(in0->size(),
getCPUPtr(mOp.getRawInput(0)),
getCPUPtr(mOp.getRawOutput(0)));
}
void Aidge::LnImpl_cpu::backward() {
const Ln_Op& op_ = dynamic_cast<const Ln_Op&>(mOp);
std::shared_ptr<Tensor> in0 = op_.getInput(0);
std::shared_ptr<Tensor> out0 = op_.getOutput(0);
std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();
std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();
AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
// Find the correct kernel type
auto kernelFunc = Registrar<LnImplBackward_cpu>::create({
in0->dataType(),
gra_int0->dataType(),
gra_out0->dataType()
});
// Call kernel
kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment