Skip to content
Snippets Groups Projects
Commit e1f02f51 authored by Grégoire Kubler's avatar Grégoire Kubler Committed by Cyril Moineau
Browse files

feat : [ADD] Hardmax operator

parent c109674c
No related branches found
No related tags found
2 merge requests!1740.6.1,!167feat_operator_hardmax
Pipeline #72552 passed
......@@ -38,6 +38,7 @@
#include "aidge/backend/cpu/operator/FCImpl.hpp"
#include "aidge/backend/cpu/operator/FoldImpl.hpp"
#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp"
#include "aidge/backend/cpu/operator/HardmaxImpl.hpp"
#include "aidge/backend/cpu/operator/HeavisideImpl.hpp"
#include "aidge/backend/cpu/operator/LRNImpl.hpp"
#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
......@@ -65,4 +66,4 @@
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#endif /* AIDGE_CPU_IMPORTS_H_ */
\ No newline at end of file
#endif /* AIDGE_CPU_IMPORTS_H_ */
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_HARDMAXIMPL_H_
#define AIDGE_CPU_OPERATOR_HARDMAXIMPL_H_
#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
#include "aidge/operator/Hardmax.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
// Operator implementation entry point for the backend
// template args
// 1st : Operator of the kernels to implements
// 2nd : signature of the forward kernel function
// 3rd (optionnal): signature of the backward kernel function
using HardmaxImpl_cpu = OperatorImpl_cpu<
Hardmax_Op,
void(std::int32_t, const std::vector<DimSize_t> &, const void *, void *)>;
// Implementation entry point registration to Operator
REGISTRAR(Hardmax_Op, "cpu", Aidge::HardmaxImpl_cpu::create);
} // namespace Aidge
#endif /* _AIDGE_CPU_OPERATOR_HARDMAXIMPL_H_ */
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_HARDMAXIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_HARDMAXIMPL_FORWARD_KERNEL_H_
#include <cmath>
#include <cstddef>
#include <numeric>
#include "aidge/backend/cpu/operator/HardmaxImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/operator/Hardmax.hpp"
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
// This is the actual implementation of the Hardmax forward kernel
template <class I, class O>
void HardmaxImpl_cpu_forward_kernel(std::int32_t axis_,
const std::vector<DimSize_t> &dims,
const void *input_,
void *output_) {
// We start by casting our arguments
const I *input = static_cast<const I *>(input_);
O *output = static_cast<O *>(output_);
// Cast axis to a size_t
const std::size_t axis = axis_ >= 0 ? axis_ : axis_ + dims.size();
// We fill all the output tensor with 0, we will set to 1 only the max
// element later
std::size_t totalElements =
std::accumulate(dims.cbegin(),
dims.cend(),
std::size_t(1),
std::multiplies<std::size_t>());
std::fill(output, output + totalElements, 0);
std::size_t postAxisStride = 1;
for (std::size_t i = axis + 1; i < dims.size(); ++i) {
postAxisStride *= dims[i];
}
std::size_t preAxisStride = 1;
for (std::size_t i = 0; i < axis; ++i) {
preAxisStride *= dims[i];
}
// For each index on all the axes before and after 'axis', we have a
// different max element to find
for (std::size_t i = 0; i < preAxisStride; ++i) {
for (std::size_t j = 0; j < postAxisStride; ++j) {
// Init the max with first element
std::size_t maxIdx = 0;
I maxVal = input[i * postAxisStride * dims[axis] + j];
// Loop over the elements on 'axis'
for (std::size_t k = 1; k < dims[axis]; ++k) {
I currVal = input[i * postAxisStride * dims[axis] +
k * postAxisStride + j];
// Update max elements
if (currVal > maxVal) {
maxIdx = k;
maxVal = currVal;
}
}
output[i * postAxisStride * dims[axis] + maxIdx * postAxisStride +
j] = 1;
}
}
}
// Then we add the Registrar declaration for different input/output types
REGISTRAR(HardmaxImpl_cpu,
{DataType::Float32},
{ProdConso::defaultModel,
Aidge::HardmaxImpl_cpu_forward_kernel<float, float>,
nullptr});
REGISTRAR(HardmaxImpl_cpu,
{DataType::Int32},
{ProdConso::defaultModel,
Aidge::HardmaxImpl_cpu_forward_kernel<std::int32_t, std::int32_t>,
nullptr});
REGISTRAR(HardmaxImpl_cpu,
{DataType::Float64},
{ProdConso::defaultModel,
Aidge::HardmaxImpl_cpu_forward_kernel<double, double>,
nullptr});
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_HARDMAXIMPL_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Hardmax.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/operator/HardmaxImpl.hpp"
#include "aidge/backend/cpu/operator/HardmaxImpl_kernels.hpp"
// This function is a specialization of the operator OperatorImpl_cpu::forward
// function It's purpose is to :
// 1. Retrieve all tensors for the operations, hence ensuring that all
// in/outputs are indeed connected.
// 2. Assert thing that couldn't be checked when creating the operator.
// 3. Retrieve the best operator implementation regarding the input tensor
// types and the operator configuration.
template <> void Aidge::HardmaxImpl_cpu::forward() {
const Hardmax_Op &op_ = dynamic_cast<const Hardmax_Op &>(mOp);
// Check if input is provided
assert(op_.getInput(0) && "missing input");
// Find the correct kernel type
const auto impl =
Registrar<HardmaxImpl_cpu>::create(getBestMatch(getRequiredSpec()));
// Call kernel
impl.forward(op_.axis(),
op_.getInput(0)->dims(),
op_.getInput(0)->getImpl()->rawPtr(),
op_.getOutput(0)->getImpl()->rawPtr());
}
// As there is currently no backward kernel for this operator.
// This function is a placeholder.
template <> void Aidge::HardmaxImpl_cpu::backward() {
AIDGE_THROW_OR_ABORT(
std::runtime_error,
"Backward not yet implemented for Hardmax_Op on backend cpu");
}
/********************************************************************************
* Copyright (c) 2024 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Hardmax.hpp"
#include "aidge/backend/cpu/operator/HardmaxImpl.hpp"
#include "aidge/utils/TensorUtils.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] Hardmax(forward)", "[Hardmax][CPU]") {
SECTION("3D Tensor") {
std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(
Array3D<float, 2, 3, 4>{{{{1.0, 2.0, 3.0, 4.0},
{8.0, 0.0, 17.0, 1.0},
{5.0, 10.0, 6.0, 0.0}},
{{7.0, 1.0, 9.0, 4.0},
{0.0, 8.0, 4.0, 2.0},
{9.0, 2.0, 0.0, 5.0}}}});
SECTION("Axis 2") {
Tensor myOutput =
Tensor(Array3D<float, 2, 3, 4>{{{{0.0, 0.0, 0.0, 1.0},
{0.0, 0.0, 1.0, 0.0},
{0.0, 1.0, 0.0, 0.0}},
{{0.0, 0.0, 1.0, 0.0},
{0.0, 1.0, 0.0, 0.0},
{1.0, 0.0, 0.0, 0.0}}}});
std::shared_ptr<Node> myHardmax = Hardmax(2);
auto op = std::static_pointer_cast<OperatorTensor>(
myHardmax->getOperator());
op->associateInput(0, myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
myHardmax->forward();
REQUIRE(*(op->getOutput(0)) == myOutput);
}
SECTION("Axis 1") {
Tensor myOutput =
Tensor(Array3D<float, 2, 3, 4>{{{{0.0, 0.0, 0.0, 1.0},
{1.0, 0.0, 1.0, 0.0},
{0.0, 1.0, 0.0, 0.0}},
{{0.0, 0.0, 1.0, 0.0},
{0.0, 1.0, 0.0, 0.0},
{1.0, 0.0, 0.0, 1.0}}}});
std::shared_ptr<Node> myHardmax = Hardmax(1);
auto op = std::static_pointer_cast<OperatorTensor>(
myHardmax->getOperator());
op->associateInput(0, myInput);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
myHardmax->forward();
REQUIRE(*(op->getOutput(0)) == myOutput);
}
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment