Skip to content
Snippets Groups Projects
Commit 889f64fc authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

add pow operator

parent 764074f5
No related branches found
No related tags found
1 merge request!19Binary operators
......@@ -23,6 +23,7 @@
#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
#include "aidge/backend/cpu/operator/MatMulImpl.hpp"
#include "aidge/backend/cpu/operator/PadImpl.hpp"
#include "aidge/backend/cpu/operator/PowImpl.hpp"
#include "aidge/backend/cpu/operator/ProducerImpl.hpp"
#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
#include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_POWIMPL_H_
#define AIDGE_CPU_OPERATOR_POWIMPL_H_
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/operator/Pow.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include <memory>
#include <vector>
namespace Aidge {
// class Pow_Op;
// compute kernel registry for forward and backward
class PowImplForward_cpu
: public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*,void*)> {
};
class PowImplBackward_cpu
: public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
};
class PowImpl_cpu : public OperatorImpl {
public:
PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op) {}
static std::unique_ptr<PowImpl_cpu> create(const Pow_Op& op) {
return std::make_unique<PowImpl_cpu>(op);
}
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
void forward() override;
};
namespace {
static Registrar<Pow_Op> registrarPowImpl_cpu("cpu", Aidge::PowImpl_cpu::create);
}
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_POWIMPL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_OPERATOR_POWIMPL_FORWARD_KERNEL_H_
#define AIDGE_CPU_OPERATOR_POWIMPL_FORWARD_KERNEL_H_
#include "aidge/utils/Registrar.hpp"
#include <cmath>
#include <iostream>
#include "aidge/backend/cpu/operator/PowImpl.hpp"
namespace Aidge {
template <class I1, class I2, class O>
void PowImpl_cpu_forward_kernel(std::size_t inputLenght,
const void* input1_,
const void* input2_,
void* output_) {
const I1* input_1 = static_cast<const I1*>(input1_);
const I2* input_2 = static_cast<const I2*>(input2_);
O* output = static_cast<O*>(output_);
for (std::size_t i = 0; i < inputLenght; ++i) {
//TODO: handle pow of two tensors the same size
output[i] = std::pow(input_1[i], input_2[0]);
}
}
namespace {
// TODO: add support for pow(float, int)
static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32, DataType::Float32},
Aidge::PowImpl_cpu_forward_kernel<float, float, float>);
static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Int32(
{DataType::Int32, DataType::Int32, DataType::Int32},
Aidge::PowImpl_cpu_forward_kernel<int, int, int>);
static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Float64(
{DataType::Float64, DataType::Float64, DataType::Float64},
Aidge::PowImpl_cpu_forward_kernel<double, double, double>);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_OPERATOR_POWIMPL_FORWARD_KERNEL_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <cassert>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Pow.hpp"
#include "aidge/utils/Types.h"
#include "aidge/backend/cpu/operator/PowImpl.hpp"
#include "aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp"
Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// this implementation can be in-place
return 0;
}
void Aidge::PowImpl_cpu::forward() {
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1");
// Find the correct kernel type
auto kernelFunc = Registrar<PowImplForward_cpu>::create({
mOp.getInput(0)->dataType(),
mOp.getInput(1)->dataType(),
mOp.getOutput(0)->dataType()});
// Call kernel
kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
}
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Pow.hpp"
#include "aidge/backend/cpu.hpp"
#include <memory>
using namespace Aidge;
TEST_CASE("[cpu/operator] Pow(forward)") {
SECTION("2D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{0.42139274, 0.51524192},
{0.85247433, 0.13432795}
}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{0.17757183, 0.26547423},
{0.72671247, 0.01804400}
}
});
std::shared_ptr<Node> myPow = Pow();
myPow->getOperator()->setDatatype(DataType::Float32);
myPow->getOperator()->setBackend("cpu");
myPow->getOperator()->associateInput(0, input_1);
myPow->getOperator()->associateInput(1, input_2);
myPow->getOperator()->computeOutputDims();
myPow->forward();
float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 4; ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
SECTION("4D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
{
{
{{0.80191749, 0.45388508, 0.86550850},
{0.47226250, 0.55809456, 0.59451854},
{0.45497441, 0.02653158, 0.44041735}},
{{0.30726379, 0.73146582, 0.46462774},
{0.30268502, 0.78075552, 0.65154958},
{0.91332406, 0.62448132, 0.53238851}},
{{0.13917381, 0.43061519, 0.30198061},
{0.12880909, 0.08995515, 0.29609048},
{0.46449280, 0.47559714, 0.24193990}}
},
{
{{0.87349969, 0.51625526, 0.16921073},
{0.95035923, 0.10066575, 0.56729180},
{0.84686232, 0.05965143, 0.03635806}},
{{0.61107808, 0.59954077, 0.45627308},
{0.84114522, 0.77186388, 0.37427086},
{0.13415480, 0.00617349, 0.84260136}},
{{0.55090177, 0.57292056, 0.29158932},
{0.67131883, 0.96988875, 0.69545972},
{0.80979776, 0.18238151, 0.19527155}}
}
}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
{
{
{{6.43071651e-01, 2.06011668e-01, 7.49104977e-01},
{2.23031864e-01, 3.11469525e-01, 3.53452295e-01},
{2.07001716e-01, 7.03924568e-04, 1.93967447e-01}},
{{9.44110379e-02, 5.35042226e-01, 2.15878934e-01},
{9.16182250e-02, 6.09579206e-01, 4.24516857e-01},
{8.34160864e-01, 3.89976919e-01, 2.83437520e-01}},
{{1.93693489e-02, 1.85429439e-01, 9.11922902e-02},
{1.65917836e-02, 8.09192937e-03, 8.76695737e-02},
{2.15753555e-01, 2.26192638e-01, 5.85349165e-02}}
},
{
{{7.63001740e-01, 2.66519487e-01, 2.86322720e-02},
{9.03182685e-01, 1.01335924e-02, 3.21819991e-01},
{7.17175782e-01, 3.55829368e-03, 1.32190844e-03}},
{{3.73416424e-01, 3.59449148e-01, 2.08185121e-01},
{7.07525253e-01, 5.95773816e-01, 1.40078679e-01},
{1.79975089e-02, 3.81119971e-05, 7.09977031e-01}},
{{3.03492755e-01, 3.28237981e-01, 8.50243345e-02},
{4.50668961e-01, 9.40684199e-01, 4.83664215e-01},
{6.55772448e-01, 3.32630165e-02, 3.81309800e-02}}
}
}
});
std::shared_ptr<Node> myPow = Pow();
myPow->getOperator()->setDatatype(DataType::Float32);
myPow->getOperator()->setBackend("cpu");
myPow->getOperator()->associateInput(0, input_1);
myPow->getOperator()->associateInput(1, input_2);
myPow->getOperator()->computeOutputDims();
myPow->forward();
float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 54; ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment