diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp index 963895c1adbff22a770851bbc5ba305007415f1e..712d520377d8d71eb5f371a13e10712bae846589 100644 --- a/include/aidge/backend/cpu.hpp +++ b/include/aidge/backend/cpu.hpp @@ -15,6 +15,8 @@ #include "aidge/backend/cpu/operator/AbsImpl.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp" #include "aidge/backend/cpu/operator/AndImpl.hpp" +#include "aidge/backend/cpu/operator/AtanImpl.hpp" + #include "aidge/backend/cpu/operator/ArgMaxImpl.hpp" #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp" #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp" diff --git a/include/aidge/backend/cpu/operator/AtanImpl.hpp b/include/aidge/backend/cpu/operator/AtanImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2f1b4bf0ad666ff9856c24fa675b70d6f830b07c --- /dev/null +++ b/include/aidge/backend/cpu/operator/AtanImpl.hpp @@ -0,0 +1,33 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_ATAN_H_ +#define AIDGE_CPU_OPERATOR_ATAN_H_ + +#include "aidge/backend/cpu/operator/OperatorImpl.hpp" +#include "aidge/operator/Atan.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" +#include <memory> +#include <vector> + +namespace Aidge { +// Operator implementation entry point for the backend +using AtanImpl_cpu = OperatorImpl_cpu<Atan_Op, + void(const std::size_t, const void*, void*), + void(const std::size_t, const void*, const void*, void*)>; + +// Implementation entry point registration to Operator +REGISTRAR(Atan_Op, "cpu", Aidge::AtanImpl_cpu::create); +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_ATAN_H_ */ diff --git a/include/aidge/backend/cpu/operator/AtanImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AtanImpl_kernels.hpp new file mode 100644 index 0000000000000000000000000000000000000000..2a786339503354514416705b61cfedfcc0b7c321 --- /dev/null +++ b/include/aidge/backend/cpu/operator/AtanImpl_kernels.hpp @@ -0,0 +1,60 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_OPERATOR_ATANIMPL_KERNELS_H_ +#define AIDGE_CPU_OPERATOR_ATANIMPL_KERNELS_H_ + +#include "aidge/utils/Registrar.hpp" + +#include "aidge/backend/cpu/operator/AtanImpl.hpp" +#include <cmath> // For atan() + + +namespace Aidge { +template <class I, class O> +void AtanImpl_cpu_forward_kernel(std::size_t inputLenght, + const void* input_, + void* output_) { + const I* input = static_cast<const I*>(input_); + O* output = static_cast<O*>(output_); + + for (size_t i = 0; i < inputLenght; ++i) { + output[i] = static_cast<O>(atan(input[i])); + } + +} + +template <class O, class GI, class GO> +void AtanImpl_cpu_backward_kernel(const std::size_t inputLenght, + const void* output_, const void* grad_output_, + void* grad_input_) { + const O* output = static_cast<const O*>(output_); + const GO* grad_output = static_cast<const GO*>(grad_output_); + GI* grad_input = static_cast<GI*>(grad_input_); + + // Apply the derivative of atan for each element in the input array + for (size_t i = 0; i < inputLenght; ++i) { + // dx = dy * (1 / (1 + x^2)) + grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + output[i] * output[i])); + } +} + + +// Kernels registration to implementation entry point +REGISTRAR(AtanImpl_cpu, + {DataType::Float32}, + {ProdConso::inPlaceModel, Aidge::AtanImpl_cpu_forward_kernel<float, float>, Aidge::AtanImpl_cpu_backward_kernel<float, float, float>}); +REGISTRAR(AtanImpl_cpu, + {DataType::Float64}, + {ProdConso::inPlaceModel, Aidge::AtanImpl_cpu_forward_kernel<double, double>, Aidge::AtanImpl_cpu_backward_kernel<double, double, double>}); +} // namespace Aidge + +#endif /* AIDGE_CPU_OPERATOR_ATANIMPL_KERNELS_H_ */ diff --git a/src/operator/AtanImpl.cpp b/src/operator/AtanImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..af3393e7eb13fad4b414172edc7d1ab32ffcc573 --- /dev/null +++ b/src/operator/AtanImpl.cpp @@ -0,0 +1,54 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <cassert> +#include <chrono> // std::chrono::milliseconds +#include <numeric> // std::accumulate +#include <thread> // std::this_thread::sleep_for +#include <vector> + +#include "aidge/operator/Atan.hpp" +#include "aidge/utils/Types.h" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + +#include "aidge/backend/cpu/operator/AtanImpl.hpp" +#include "aidge/backend/cpu/operator/AtanImpl_kernels.hpp" + +template <> +void Aidge::AtanImpl_cpu::forward() { + const Atan_Op& op_ = dynamic_cast<const Atan_Op&>(mOp); + std::shared_ptr<Tensor> in0 = op_.getInput(0); + std::shared_ptr<Tensor> out0 = op_.getOutput(0); + AIDGE_ASSERT(in0, "missing input #0"); + + // Find the correct kernel type + const auto impl = Registrar<AtanImpl_cpu>::create(getBestMatch(getRequiredSpec())); + + // Call kernel + impl.forward(in0->size(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); +} + +template <> +void Aidge::AtanImpl_cpu::backward() { + const Atan_Op& op_ = dynamic_cast<const Atan_Op&>(mOp); + std::shared_ptr<Tensor> out0 = op_.getOutput(0); + std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad(); + std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad(); + AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type()); + + // Find the correct kernel type + const auto impl = Registrar<AtanImpl_cpu>::create(getBestMatch(getRequiredSpec())); + + // Call kernel + impl.backward(gra_int0->size(), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0)); +} diff --git a/unit_tests/operator/Test_Atan.cpp b/unit_tests/operator/Test_Atan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9548e35d81b0423125424a4198d82558c4e57df4 --- /dev/null +++ b/unit_tests/operator/Test_Atan.cpp @@ -0,0 +1,77 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/operator/Atan.hpp" + +#include "aidge/backend/cpu.hpp" + +#include <memory> + +using namespace Aidge; + +TEST_CASE("[cpu/operator] Atan(forward)") { + SECTION("1D Tensor") { + std::shared_ptr<Tensor> input0 = + std::make_shared<Tensor>(Array1D<float, 10>{ + {0.41384590, 0.43120754, 0.93762982, 0.31049860, 0.77547199, + 0.09514862, 0.16145366, 0.42776686, 0.43487436, 0.41170865}}); + std::shared_ptr<Tensor> expectedOutput = + std::make_shared<Tensor>(Array1D<float, 10>{ + {0.39238522, 0.40711672, 0.75322037, 0.30106049, 0.65960488, + 0.09486303, 0.16007232, 0.40421187, 0.4102045, 0.39055911}}); + + std::shared_ptr<Node> myAtan = Atan(); + auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator()); + op->associateInput(0, input0); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + myAtan->forward(); + + float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); + float* expectedPtr = + static_cast<float*>(expectedOutput->getImpl()->rawPtr()); + for (std::size_t i = 0; i < expectedOutput->size(); ++i) { + REQUIRE(std::abs(resPtr[i] - expectedPtr[i]) < 0.00001); + } + } + + SECTION("3D Tensor") { + std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>( + Array3D<float, 2, 2, 3>{{{ + {0.97037154, 0.86208081, 0.77767169}, + {0.38160080, 0.11422747, 0.77284443}, + }, + {{0.51592529, 0.72543722, 0.54641193}, + {0.93866944, 0.97767913, 0.34172094}}}}); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>( + Array3D<float, 2, 2, 3>{{{{0.77036231, 0.71146592, 0.66097706}, + {0.36454508, 0.11373451, 0.65796196}}, + {{0.47630652, 0.62759472, 0.50008428}, + {0.75377332, 0.77411225, 0.32928031}}}}); + + std::shared_ptr<Node> myAtan = Atan(); + auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator()); + op->associateInput(0, input0); + op->setDataType(DataType::Float32); + op->setBackend("cpu"); + myAtan->forward(); + + float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr()); + float* expectedPtr = + static_cast<float*>(expectedOutput->getImpl()->rawPtr()); + for (std::size_t i = 0; i < expectedOutput->size(); ++i) { + REQUIRE(std::abs(resPtr[i] - expectedPtr[i]) < 0.00001); + } + } +}