From 0c96654af57e2fe12031647ab9ca4794aa1d7358 Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Wed, 7 Feb 2024 14:56:28 +0100 Subject: [PATCH] Added Tanh and Sigmoid operators --- include/aidge/backend/cpu.hpp | 2 + .../cpu/operator/ReLUImpl_forward_kernels.hpp | 1 + .../backend/cpu/operator/SigmoidImpl.hpp | 51 +++++++++++++++++++ .../operator/SigmoidImpl_forward_kernels.hpp | 42 +++++++++++++++ .../aidge/backend/cpu/operator/TanhImpl.hpp | 51 +++++++++++++++++++ .../cpu/operator/TanhImpl_forward_kernels.hpp | 42 +++++++++++++++ src/operator/SigmoidImpl.cpp | 42 +++++++++++++++ src/operator/TanhImpl.cpp | 42 +++++++++++++++ unit_tests/scheduler/Test_Scheduler.cpp | 6 +-- 9 files changed, 274 insertions(+), 5 deletions(-) create mode 100644 include/aidge/backend/cpu/operator/SigmoidImpl.hpp create mode 100644 include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp create mode 100644 include/aidge/backend/cpu/operator/TanhImpl.hpp create mode 100644 include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp create mode 100644 src/operator/SigmoidImpl.cpp create mode 100644 src/operator/TanhImpl.cpp diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp index a0d232f6..fdb1c93b 100644 --- a/include/aidge/backend/cpu.hpp +++ b/include/aidge/backend/cpu.hpp @@ -31,9 +31,11 @@ #include "aidge/backend/cpu/operator/ProducerImpl.hpp" #include "aidge/backend/cpu/operator/ReLUImpl.hpp" #include "aidge/backend/cpu/operator/ScalingImpl.hpp" +#include "aidge/backend/cpu/operator/SigmoidImpl.hpp" #include "aidge/backend/cpu/operator/SliceImpl.hpp" #include "aidge/backend/cpu/operator/SqrtImpl.hpp" #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp" #include "aidge/backend/cpu/operator/SubImpl.hpp" +#include "aidge/backend/cpu/operator/TanhImpl.hpp" #endif /* AIDGE_CPU_IMPORTS_H_ */ \ No newline at end of file diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp index 955099a6..90b22c5f 100644 --- a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp @@ -25,6 +25,7 @@ void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght, const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); +#pragma omp parallel for if (inputLenght > 1024) for (std::size_t i = 0; i < inputLenght; ++i) { output[i] = input[i] > 0 ? input[i] : 0; } diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp new file mode 100644 index 00000000..8678a5a5 --- /dev/null +++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp @@ -0,0 +1,51 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_ +#define AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Sigmoid.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Sigmoid_Op; + +// compute kernel registry for forward and backward +class SigmoidImplForward_cpu + : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; +class SigmoidImplBackward_cpu + : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; + +class SigmoidImpl_cpu : public OperatorImpl { +public: + SigmoidImpl_cpu(const Sigmoid_Op& op) : OperatorImpl(op) {} + + static std::unique_ptr<SigmoidImpl_cpu> create(const Sigmoid_Op& op) { + return std::make_unique<SigmoidImpl_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Sigmoid_Op> registrarSigmoidImpl_cpu("cpu", Aidge::SigmoidImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_SIGMOIDIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp new file mode 100644 index 00000000..96303312 --- /dev/null +++ b/include/aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/SigmoidImpl.hpp" + +namespace Aidge { +template <class I, class O> +void SigmoidImpl_cpu_forward_kernel(std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + +#pragma omp parallel for if (inputLenght > 1024) + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = static_cast<O>(1.0) / (static_cast<O>(1.0) + std::exp(-input[i])); + } +} + +namespace { +static Registrar<SigmoidImplForward_cpu> registrarSigmoidImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::SigmoidImpl_cpu_forward_kernel<float, float>); +static Registrar<SigmoidImplForward_cpu> registrarSigmoidImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::SigmoidImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_SIGMOIDIMPL_FORWARD_KERNEL_H_ */ diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp new file mode 100644 index 00000000..3e88a3d0 --- /dev/null +++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp @@ -0,0 +1,51 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_H_ +#define AIDGE_CPU_OPERATOR_TANHIMPL_H_ + +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/Tanh.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +// class Tanh_Op; + +// compute kernel registry for forward and backward +class TanhImplForward_cpu + : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; +class TanhImplBackward_cpu + : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> { +}; + +class TanhImpl_cpu : public OperatorImpl { +public: + TanhImpl_cpu(const Tanh_Op& op) : OperatorImpl(op) {} + + static std::unique_ptr<TanhImpl_cpu> create(const Tanh_Op& op) { + return std::make_unique<TanhImpl_cpu>(op); + } + + NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final; + void forward() override; +}; + +namespace { +static Registrar<Tanh_Op> registrarTanhImpl_cpu("cpu", Aidge::TanhImpl_cpu::create); +} +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_H_ */ diff --git a/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp new file mode 100644 index 00000000..3012aae9 --- /dev/null +++ b/include/aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_ +#define AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/TanhImpl.hpp" + +namespace Aidge { +template <class I, class O> +void TanhImpl_cpu_forward_kernel(std::size_t inputLenght, + const void* input_, + void* output_) { + + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + +#pragma omp parallel for if (inputLenght > 1024) + for (std::size_t i = 0; i < inputLenght; ++i) { + output[i] = std::tanh(input[i]); + } +} + +namespace { +static Registrar<TanhImplForward_cpu> registrarTanhImplForward_cpu_Float32( + {DataType::Float32, DataType::Float32}, Aidge::TanhImpl_cpu_forward_kernel<float, float>); +static Registrar<TanhImplForward_cpu> registrarTanhImplForward_cpu_Float64( + {DataType::Float64, DataType::Float64}, Aidge::TanhImpl_cpu_forward_kernel<double, double>); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_TANHIMPL_FORWARD_KERNEL_H_ */ diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp new file mode 100644 index 00000000..7322e08b --- /dev/null +++ b/src/operator/SigmoidImpl.cpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Sigmoid.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/SigmoidImpl.hpp" +#include "aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp" + +Aidge::NbElts_t Aidge::SigmoidImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} + +void Aidge::SigmoidImpl_cpu::forward() { + assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<SigmoidImplForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); +} diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp new file mode 100644 index 00000000..c4658440 --- /dev/null +++ b/src/operator/TanhImpl.cpp @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Tanh.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/TanhImpl.hpp" +#include "aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp" + +Aidge::NbElts_t Aidge::TanhImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { + // this implementation can be in-place + return 0; +} + +void Aidge::TanhImpl_cpu::forward() { + assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0"); + + // Find the correct kernel type + auto kernelFunc = Registrar<TanhImplForward_cpu>::create({ + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), + std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); + + // Call kernel + kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); +} diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp index 1ae235f9..2440fa82 100644 --- a/unit_tests/scheduler/Test_Scheduler.cpp +++ b/unit_tests/scheduler/Test_Scheduler.cpp @@ -225,11 +225,7 @@ TEST_CASE("[cpu/scheduler] SequentialScheduler(forward)") { bias->addChild(add2, 0, 1); add1->getOperator()->setInput(0, in); // Update GraphView inputs/outputs following previous connections: - g->add(mem); - g->add(add1); - g->add(add2); - g->add(init); - g->add(bias); + g->add({mem, add1, add2, init, bias}); g->setBackend("cpu"); g->setDataType(Aidge::DataType::Int32); -- GitLab