Skip to content
Snippets Groups Projects
Commit 6f45a121 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Merge branch 'BCE_loss' into 'dev'

Add BCE loss function

See merge request !8
parents c744dbc4 7f15cc4f
No related branches found
No related tags found
2 merge requests!10version 0.1.2,!8Add BCE loss function
Pipeline #49004 failed
...@@ -31,6 +31,8 @@ namespace loss { ...@@ -31,6 +31,8 @@ namespace loss {
*/ */
Tensor MSE(std::shared_ptr<Tensor>& prediction, Tensor MSE(std::shared_ptr<Tensor>& prediction,
const std::shared_ptr<Tensor>& target); const std::shared_ptr<Tensor>& target);
Tensor BCE(std::shared_ptr<Tensor>& prediction,
const std::shared_ptr<Tensor>& target);
} // namespace loss } // namespace loss
} // namespace Aidge } // namespace Aidge
......
...@@ -23,5 +23,6 @@ void init_Loss(py::module &m) { ...@@ -23,5 +23,6 @@ void init_Loss(py::module &m) {
auto m_loss = auto m_loss =
m.def_submodule("loss", "Submodule dedicated to loss functions"); m.def_submodule("loss", "Submodule dedicated to loss functions");
m_loss.def("MSE", &loss::MSE, py::arg("graph"), py::arg("target")); m_loss.def("MSE", &loss::MSE, py::arg("graph"), py::arg("target"));
m_loss.def("BCE", &loss::BCE, py::arg("graph"), py::arg("target"));
} }
} // namespace Aidge } // namespace Aidge
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <memory>
#include <numeric> // std::iota
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/GraphView.hpp"
#include "aidge/graph/OpArgs.hpp"
#include "aidge/loss/LossList.hpp"
#include "aidge/recipes/GraphViewHelper.hpp"
#include "aidge/scheduler/Scheduler.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Add.hpp"
#include "aidge/operator/Sub.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/operator/Div.hpp"
#include "aidge/operator/Ln.hpp"
#include "aidge/operator/ReduceMean.hpp"
#include "aidge/backend/cpu/operator/AddImpl.hpp"
#include "aidge/backend/cpu/operator/SubImpl.hpp"
#include "aidge/backend/cpu/operator/MulImpl.hpp"
#include "aidge/backend/cpu/operator/DivImpl.hpp"
#include "aidge/backend/cpu/operator/LnImpl.hpp"
#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
Aidge::Tensor Aidge::loss::BCE(std::shared_ptr<Tensor>& prediction,
const std::shared_ptr<Tensor>& target) {
/*
Binay Cross Entropy (BCE) loss function
Implementation note:
loss function is computed using a graph in order to not be backend dependant.
*/
AIDGE_ASSERT(target->dims().size() == 2,
"Label must have two dims: [BatchSize, NbChannel]");
AIDGE_ASSERT(prediction->backend() == target->backend(),
"'prediction' and 'target' Tensors must be on the "
"same backend. Found {} and {}.\n",
prediction->backend(), target->backend());
AIDGE_ASSERT(prediction->dims() == target->dims(),
"'prediction' (shape {}) and 'target' (shape {}) Tensors must "
"have the same dimensions.\n",
prediction->dims(), target->dims());
AIDGE_ASSERT(prediction->dataType() == target->dataType(),
"'prediction' (data type {}) and 'target' (data type {}) "
"Tensors must have the same data type.\n",
prediction->dataType(), target->dataType());
const float eps1 = 1.e-10f;
const float eps2 = 1.e-10f;
// Define nodes: inputs
const std::shared_ptr<Node> prediction_node = Producer(prediction, "pred");
const std::shared_ptr<Node> target_node = Producer(target, "label");
// Define nodes: add1 = prediction + eps1, add2 = target + eps1
const std::shared_ptr<Node> add1_node = Add(2, "add1");
const std::shared_ptr<Node> add2_node = Add(2, "add2");
prediction_node->addChild(add1_node, 0, 0);
Producer(std::make_shared<Tensor>(Array1D<float, 1>{{eps1}}))
->addChild(add1_node, 0, 1);
target_node->addChild(add2_node, 0, 0);
Producer(std::make_shared<Tensor>(Array1D<float, 1>{{eps1}}))
->addChild(add2_node, 0, 1);
// Define nodes: sub1 = 1 - prediction + eps2 and sub2 = - (1 - target + eps2)
const std::shared_ptr<Node> sub1_node = Sub("sub1");
const std::shared_ptr<Node> sub2_node = Sub("sub2");
Producer(std::make_shared<Tensor>(Array1D<float, 1>{{1.0f + eps2}}))
->addChild(sub1_node, 0, 0);
prediction_node->addChild(sub1_node, 0, 1);
target_node->addChild(sub2_node, 0, 0);
Producer(std::make_shared<Tensor>(Array1D<float, 1>{{1.0f + eps2}}))
->addChild(sub2_node, 0, 1);
// Define nodes: ln1 = ln(prediction + eps1) and ln2 = ln(1 - prediction + eps2)
const std::shared_ptr<Node> ln1_node = Ln("ln1");
const std::shared_ptr<Node> ln2_node = Ln("ln2");
add1_node-> addChild(ln1_node, 0, 0);
sub1_node-> addChild(ln2_node, 0, 0);
// Define nodes: mul1 = (target + eps1) * ln(prediction + eps1) and mul2 = - (1 - target + eps2) * ln(1 - prediction + eps2)
const std::shared_ptr<Node> mul1_node = Mul("mul1");
const std::shared_ptr<Node> mul2_node = Mul("mul2");
add2_node->addChild(mul1_node, 0, 0);
ln1_node->addChild(mul1_node, 0, 1);
sub2_node->addChild(mul2_node, 0, 0);
ln2_node->addChild(mul2_node, 0, 1);
// Define node: sub3 = - [(target + eps1) * ln(prediction + eps1) + (1 - target + eps2) * ln(1 - prediction + eps2)]
const std::shared_ptr<Node> sub3_node = Sub("sub3");
mul2_node->addChild(sub3_node, 0, 0);
mul1_node->addChild(sub3_node, 0, 1);
// Define nodes: div1 = (target + eps1) / (prediction + eps1) and div2 = - (1 - target + eps2)/(1 - prediction + eps2)
const std::shared_ptr<Node> div1_node = Div("div1");
const std::shared_ptr<Node> div2_node = Div("div2");
add2_node->addChild(div1_node, 0, 0);
add1_node->addChild(div1_node, 0, 1);
sub2_node->addChild(div2_node, 0, 0);
sub1_node->addChild(div2_node, 0, 1);
// Define node: add3 = (target + eps1) / (prediction + eps1) - (1 - target + eps2)/(1 - prediction + eps2)
const std::shared_ptr<Node> add3_node = Add(2, "add3");
div1_node->addChild(add3_node, 0, 0);
div2_node->addChild(add3_node, 0, 1);
// Define node: loss
std::vector<int> axes_dims(prediction->nbDims());
std::iota(std::begin(axes_dims), std::end(axes_dims), 0);
auto loss_node = ReduceMean(axes_dims, 1, "loss");
sub3_node->addChild(loss_node, 0, 0);
// Define node: gradient
const std::shared_ptr<Node> gradient_node = Mul("gradient");
add3_node->addChild(gradient_node, 0, 0);
Producer(std::make_shared<Tensor>(Array1D<float, 1>{{-1.0f/float(target->dims()[0])}}))
->addChild(gradient_node, 0, 1);
// Create GraphView
std::shared_ptr<GraphView> gv_loss = std::make_shared<GraphView>("BCE");
gv_loss->add({prediction_node, target_node,
add1_node->getParent(1), add1_node,
add2_node->getParent(1), add2_node,
sub1_node->getParent(0), sub1_node,
sub2_node->getParent(1), sub2_node,
ln1_node, ln2_node, mul1_node, mul2_node, div1_node, div2_node,
sub3_node, loss_node,
add3_node, gradient_node->getParent(1), gradient_node});
gv_loss->compile(prediction->getImpl()->backend(), prediction->dataType());
// Compute loss and gradient
SequentialScheduler ss_loss{gv_loss};
ss_loss.forward(false);
prediction->initGrad(); // Enable gradient for output
std::shared_ptr<Tensor> outputGrad = prediction->grad();
const std::shared_ptr<OperatorTensor> gradient_op = std::dynamic_pointer_cast<OperatorTensor>(gradient_node->getOperator());
outputGrad->copyFrom(gradient_op->getOutput(0)->clone()); // Update gradient
const std::shared_ptr<OperatorTensor> loss_op = std::dynamic_pointer_cast<OperatorTensor>(loss_node->getOperator());
return loss_op->getOutput(0)->clone(); // Return loss
}
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "aidge/backend/cpu/operator/PowImpl.hpp" #include "aidge/backend/cpu/operator/PowImpl.hpp"
#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp" #include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
#include "aidge/backend/cpu/operator/SubImpl.hpp" #include "aidge/backend/cpu/operator/SubImpl.hpp"
#include "aidge/backend/cpu/operator/MulImpl.hpp"
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/graph/GraphView.hpp" #include "aidge/graph/GraphView.hpp"
#include "aidge/graph/OpArgs.hpp" #include "aidge/graph/OpArgs.hpp"
...@@ -23,6 +24,7 @@ ...@@ -23,6 +24,7 @@
#include "aidge/operator/Pow.hpp" #include "aidge/operator/Pow.hpp"
#include "aidge/operator/ReduceMean.hpp" #include "aidge/operator/ReduceMean.hpp"
#include "aidge/operator/Sub.hpp" #include "aidge/operator/Sub.hpp"
#include "aidge/operator/Mul.hpp"
#include "aidge/recipes/GraphViewHelper.hpp" #include "aidge/recipes/GraphViewHelper.hpp"
#include "aidge/scheduler/Scheduler.hpp" #include "aidge/scheduler/Scheduler.hpp"
#include "aidge/scheduler/SequentialScheduler.hpp" #include "aidge/scheduler/SequentialScheduler.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <cstddef> // std::size_t
#include <cmath> //
#include <functional> // std::multiplies, std::plus
#include <memory> // std::make_unique
#include <numeric> // std::accumulate
#include <random> // std::random_device, std::mt19937,
// std::uniform_int_distribution
#include <vector>
#include "aidge/loss/LossList.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/TensorUtils.hpp"
namespace Aidge {
TEST_CASE("[loss/classification] BCE", "[loss][classification][BCE]") {
constexpr std::uint16_t NBTRIALS = 10;
// set random variables
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<std::size_t> dimsDist(1, 5);
std::uniform_int_distribution<std::size_t> nbDimsDist(1, 2);
std::uniform_real_distribution<float> valueDist(0.0f, 1.0f);
for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
const std::size_t nb_dims = 2; // For BCE test, nb_dims is fixed as 2: NbBatch, NbChan
std::vector<std::size_t> dims(2);
for (std::size_t i = 0; i < nb_dims; ++i) { dims[i] = dimsDist(gen); }
const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
// create random predictions
std::unique_ptr<float[]> pred = std::make_unique<float[]>(nb_elements);
for (std::size_t i = 0; i < nb_elements; ++i) {
pred[i] = valueDist(gen);
}
// create random targets
std::unique_ptr<float[]> targ = std::make_unique<float[]>(nb_elements);
for (std::size_t i = 0; i < nb_elements; ++i) {
targ[i] = valueDist(gen);
}
// compute the BCE manually
const float eps1 = 1.0e-10f;
const float eps2 = 1.0e-10f;
std::unique_ptr<float[]> tmp_res_manual = std::make_unique<float[]>(nb_elements);
for (std::size_t i = 0; i < nb_elements; ++i) {
tmp_res_manual[i] = - ((targ[i] + eps1) * std::log(pred[i] + eps1) + (1.0f - targ[i] + eps2) * std::log(1.0f - pred[i] + eps2));
}
std::cout << "Output manual:" << std::endl;
std::shared_ptr<Tensor> tmp_tensor = std::make_shared<Tensor>(dims);
tmp_tensor->setBackend("cpu");
tmp_tensor->getImpl()->setRawPtr(tmp_res_manual.get(), nb_elements);
tmp_tensor->print();
const float res_manual = std::accumulate(&tmp_res_manual[0], &tmp_res_manual[nb_elements], 0.0f, std::plus<float>()) / static_cast<float>(nb_elements);
// compute the BCE using Aidge::loss::BCE function
std::cout << "Input 0 manual:" << std::endl;
std::shared_ptr<Tensor> pred_tensor = std::make_shared<Tensor>(dims);
pred_tensor->setBackend("cpu");
pred_tensor->getImpl()->setRawPtr(pred.get(), nb_elements);
pred_tensor->print();
std::cout << "Input 1 manual:" << std::endl;
std::shared_ptr<Tensor> targ_tensor = std::make_shared<Tensor>(dims);
targ_tensor->setBackend("cpu");
targ_tensor->getImpl()->setRawPtr(targ.get(), nb_elements);
targ_tensor->print();
const Tensor res_function = loss::BCE(pred_tensor, targ_tensor);
// compare results
Tensor res_manual_tensor = Tensor(res_manual);
REQUIRE(approxEq<float>(res_manual, res_function));
}
}
} // namespace Aidge
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment