From 57cd0a7754142e3993414c2509dc398a9b4ee9ea Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Tue, 19 Nov 2024 15:26:24 +0100 Subject: [PATCH] Add LRN operator --- include/aidge/backend/cpu.hpp | 1 + .../aidge/backend/cpu/operator/LRNImpl.hpp | 32 +++++++++ .../backend/cpu/operator/LRNImpl_kernels.hpp | 69 +++++++++++++++++++ src/operator/LRNImpl.cpp | 46 +++++++++++++ 4 files changed, 148 insertions(+) create mode 100644 include/aidge/backend/cpu/operator/LRNImpl.hpp create mode 100644 include/aidge/backend/cpu/operator/LRNImpl_kernels.hpp create mode 100644 src/operator/LRNImpl.cpp diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp index 0faca965..37a781c6 100644 --- a/include/aidge/backend/cpu.hpp +++ b/include/aidge/backend/cpu.hpp @@ -31,6 +31,7 @@ #include "aidge/backend/cpu/operator/FCImpl.hpp" #include "aidge/backend/cpu/operator/FoldImpl.hpp" #include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp" +#include "aidge/backend/cpu/operator/LRNImpl.hpp" #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp" #include "aidge/backend/cpu/operator/LnImpl.hpp" #include "aidge/backend/cpu/operator/MatMulImpl.hpp" diff --git a/include/aidge/backend/cpu/operator/LRNImpl.hpp b/include/aidge/backend/cpu/operator/LRNImpl.hpp new file mode 100644 index 00000000..81956c87 --- /dev/null +++ b/include/aidge/backend/cpu/operator/LRNImpl.hpp @@ -0,0 +1,32 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_LRNIMPL_H_ +#define AIDGE_CPU_OPERATOR_LRNIMPL_H_ + +#include "aidge/backend/cpu/operator/OperatorImpl.hpp" +#include "aidge/operator/LRN.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +// Operator implementation entry point for the backend +using LRNImpl_cpu = OperatorImpl_cpu<LRN_Op, + void(float, float, float, std::size_t, const std::vector<DimSize_t>&, const void*, void*)>; + +// Implementation entry point registration to Operator +REGISTRAR(LRN_Op, "cpu", Aidge::LRNImpl_cpu::create); +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_LRNIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/LRNImpl_kernels.hpp b/include/aidge/backend/cpu/operator/LRNImpl_kernels.hpp new file mode 100644 index 00000000..02018c9f --- /dev/null +++ b/include/aidge/backend/cpu/operator/LRNImpl_kernels.hpp @@ -0,0 +1,69 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_LRNIMPL_KERNELS_H_ +#define AIDGE_CPU_OPERATOR_LRNIMPL_KERNELS_H_ + +#include "aidge/utils/Registrar.hpp" +#include <cstddef> +#include <cmath> +#include "aidge/data/Data.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/LRNImpl.hpp" + +namespace Aidge { +template <class I, class O> +void LRNImpl_cpu_forward_kernel(float alpha, float beta, float bias, std::size_t size, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_) +{ + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + const DimSize_t nbBatch = inputDims[0]; + const DimSize_t nbChannels = (inputDims.size() > 1) ? inputDims[1] : 1; + const DimSize_t featureMapSize = (inputDims.size() > 2) ? std::accumulate(inputDims.begin() + 2, inputDims.end(), 1, std::multiplies<DimSize_t>()) : 1; + + for (std::size_t batch = 0; batch < nbBatch; ++batch) { + for (std::size_t ch = 0; ch < nbChannels; ++ch) { + const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize; + const unsigned int channelMin + = std::max<int>(0, ch - size / 2); + const unsigned int channelMax + = std::min<size_t>(nbChannels - 1, ch + size / 2); + + for (std::size_t feature = 0; feature<featureMapSize; ++feature) { + // For each input channel, accumulate the value + O accAccrossChannels(0.0); + + for (unsigned int accChannel = channelMin; + accChannel < channelMax; ++accChannel) + { + accAccrossChannels += input[ioIndex + feature]; + } + + // Compute the output signal + output[ioIndex + feature] = input[ioIndex + feature] + / std::pow((bias + (accAccrossChannels * accAccrossChannels) * alpha), beta); + } + } + } +} + +REGISTRAR(LRNImpl_cpu, + {DataType::Float32}, + {ProdConso::inPlaceModel, Aidge::LRNImpl_cpu_forward_kernel<float, float>, nullptr}); +REGISTRAR(LRNImpl_cpu, + {DataType::Float64}, + {ProdConso::inPlaceModel, Aidge::LRNImpl_cpu_forward_kernel<double, double>, nullptr}); +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_LRNIMPL_KERNELS_H_ */ diff --git a/src/operator/LRNImpl.cpp b/src/operator/LRNImpl.cpp new file mode 100644 index 00000000..b914ffac --- /dev/null +++ b/src/operator/LRNImpl.cpp @@ -0,0 +1,46 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/LRN.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/LRNImpl.hpp" +#include "aidge/backend/cpu/operator/LRNImpl_kernels.hpp" + +template <> +void Aidge::LRNImpl_cpu::forward() { + const auto& op_ = dynamic_cast<const LRN_Op&>(mOp); + AIDGE_ASSERT(!op_.getInput(0)->empty(), "LRN input empty"); + + // Find the correct kernel type + const auto impl = Registrar<LRNImpl_cpu>::create(getBestMatch(getRequiredSpec())); + + // Call kernel + impl.forward(op_.alpha(), + op_.beta(), + op_.bias(), + op_.size(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); +} + +template <> +void Aidge::LRNImpl_cpu::backward() { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for LRN_Op on backend cpu"); +} -- GitLab