Skip to content
Snippets Groups Projects
Commit 684f7bc6 authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Add AtanImpl for forward & backward, add unit test for forward.

parent 8ad1d47c
No related branches found
No related tags found
3 merge requests!118v0.4.0,!108v0.4.0,!92Export refactor
Pipeline #55421 failed
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include "aidge/backend/cpu/operator/AbsImpl.hpp" #include "aidge/backend/cpu/operator/AbsImpl.hpp"
#include "aidge/backend/cpu/operator/AddImpl.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp"
#include "aidge/backend/cpu/operator/AndImpl.hpp" #include "aidge/backend/cpu/operator/AndImpl.hpp"
#include "aidge/backend/cpu/operator/AtanImpl.hpp"
#include "aidge/backend/cpu/operator/ArgMaxImpl.hpp" #include "aidge/backend/cpu/operator/ArgMaxImpl.hpp"
#include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp" #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
#include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp" #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_ATAN_H_
#define AIDGE_CPU_OPERATOR_ATAN_H_
#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
#include "aidge/operator/Atan.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include <memory>
#include <vector>
namespace Aidge {
// Operator implementation entry point for the backend
using AtanImpl_cpu = OperatorImpl_cpu<Atan_Op,
void(const std::size_t, const void*, void*),
void(const std::size_t, const void*, const void*, void*)>;
// Implementation entry point registration to Operator
REGISTRAR(Atan_Op, "cpu", Aidge::AtanImpl_cpu::create);
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_ATAN_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_ATANIMPL_KERNELS_H_
#define AIDGE_CPU_OPERATOR_ATANIMPL_KERNELS_H_
#include "aidge/utils/Registrar.hpp"
#include "aidge/backend/cpu/operator/AtanImpl.hpp"
#include <cmath> // For atan()
namespace Aidge {
template <class I, class O>
void AtanImpl_cpu_forward_kernel(std::size_t inputLenght,
const void* input_,
void* output_) {
const I* input = static_cast<const I*>(input_);
O* output = static_cast<O*>(output_);
for (size_t i = 0; i < inputLenght; ++i) {
output[i] = static_cast<O>(atan(input[i]));
}
}
template <class O, class GI, class GO>
void AtanImpl_cpu_backward_kernel(const std::size_t inputLenght,
const void* output_, const void* grad_output_,
void* grad_input_) {
const O* output = static_cast<const O*>(output_);
const GO* grad_output = static_cast<const GO*>(grad_output_);
GI* grad_input = static_cast<GI*>(grad_input_);
// Apply the derivative of atan for each element in the input array
for (size_t i = 0; i < inputLenght; ++i) {
// dx = dy * (1 / (1 + x^2))
grad_input[i] = grad_output[i] * static_cast<O>(1.0 / (1.0 + output[i] * output[i]));
}
}
// Kernels registration to implementation entry point
REGISTRAR(AtanImpl_cpu,
{DataType::Float32},
{ProdConso::inPlaceModel, Aidge::AtanImpl_cpu_forward_kernel<float, float>, Aidge::AtanImpl_cpu_backward_kernel<float, float, float>});
REGISTRAR(AtanImpl_cpu,
{DataType::Float64},
{ProdConso::inPlaceModel, Aidge::AtanImpl_cpu_forward_kernel<double, double>, Aidge::AtanImpl_cpu_backward_kernel<double, double, double>});
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_ATANIMPL_KERNELS_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Atan.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/backend/cpu/operator/AtanImpl.hpp"
#include "aidge/backend/cpu/operator/AtanImpl_kernels.hpp"
template <>
void Aidge::AtanImpl_cpu::forward() {
const Atan_Op& op_ = dynamic_cast<const Atan_Op&>(mOp);
std::shared_ptr<Tensor> in0 = op_.getInput(0);
std::shared_ptr<Tensor> out0 = op_.getOutput(0);
AIDGE_ASSERT(in0, "missing input #0");
// Find the correct kernel type
const auto impl = Registrar<AtanImpl_cpu>::create(getBestMatch(getRequiredSpec()));
// Call kernel
impl.forward(in0->size(),
getCPUPtr(mOp.getRawInput(0)),
getCPUPtr(mOp.getRawOutput(0)));
}
template <>
void Aidge::AtanImpl_cpu::backward() {
const Atan_Op& op_ = dynamic_cast<const Atan_Op&>(mOp);
std::shared_ptr<Tensor> out0 = op_.getOutput(0);
std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad();
std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad();
AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
// Find the correct kernel type
const auto impl = Registrar<AtanImpl_cpu>::create(getBestMatch(getRequiredSpec()));
// Call kernel
impl.backward(gra_int0->size(), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Atan.hpp"
#include "aidge/backend/cpu.hpp"
#include <memory>
using namespace Aidge;
TEST_CASE("[cpu/operator] Atan(forward)") {
SECTION("1D Tensor") {
std::shared_ptr<Tensor> input0 =
std::make_shared<Tensor>(Array1D<float, 10>{
{0.41384590, 0.43120754, 0.93762982, 0.31049860, 0.77547199,
0.09514862, 0.16145366, 0.42776686, 0.43487436, 0.41170865}});
std::shared_ptr<Tensor> expectedOutput =
std::make_shared<Tensor>(Array1D<float, 10>{
{0.39238522, 0.40711672, 0.75322037, 0.30106049, 0.65960488,
0.09486303, 0.16007232, 0.40421187, 0.4102045, 0.39055911}});
std::shared_ptr<Node> myAtan = Atan();
auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator());
op->associateInput(0, input0);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
myAtan->forward();
float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr =
static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i < expectedOutput->size(); ++i) {
REQUIRE(std::abs(resPtr[i] - expectedPtr[i]) < 0.00001);
}
}
SECTION("3D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(
Array3D<float, 2, 2, 3>{{{
{0.97037154, 0.86208081, 0.77767169},
{0.38160080, 0.11422747, 0.77284443},
},
{{0.51592529, 0.72543722, 0.54641193},
{0.93866944, 0.97767913, 0.34172094}}}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(
Array3D<float, 2, 2, 3>{{{{0.77036231, 0.71146592, 0.66097706},
{0.36454508, 0.11373451, 0.65796196}},
{{0.47630652, 0.62759472, 0.50008428},
{0.75377332, 0.77411225, 0.32928031}}}});
std::shared_ptr<Node> myAtan = Atan();
auto op = std::static_pointer_cast<OperatorTensor>(myAtan->getOperator());
op->associateInput(0, input0);
op->setDataType(DataType::Float32);
op->setBackend("cpu");
myAtan->forward();
float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr =
static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i < expectedOutput->size(); ++i) {
REQUIRE(std::abs(resPtr[i] - expectedPtr[i]) < 0.00001);
}
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment