Skip to content
Snippets Groups Projects
Commit c4e12eee authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

add support for same size tensors

parent 6bb7ae3d
No related branches found
No related tags found
1 merge request!19Binary operators
......@@ -24,10 +24,10 @@ namespace Aidge {
// compute kernel registry for forward and backward
class DivImplForward_cpu
: public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*,void*)> {
: public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
};
class DivImplBackward_cpu
: public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
: public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
};
class DivImpl_cpu : public OperatorImpl {
......
......@@ -13,13 +13,13 @@
#define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
#include "aidge/utils/Registrar.hpp"
#include <cmath>
#include <iostream>
#include "aidge/backend/cpu/operator/DivImpl.hpp"
namespace Aidge {
template <class I1, class I2, class O>
void DivImpl_cpu_forward_kernel(std::size_t inputLenght,
void DivImpl_cpu_forward_kernel(std::size_t input1Length,
std::size_t input2Length,
const void* input1_,
const void* input2_,
void* output_) {
......@@ -27,15 +27,21 @@ void DivImpl_cpu_forward_kernel(std::size_t inputLenght,
const I1* input_1 = static_cast<const I1*>(input1_);
const I2* input_2 = static_cast<const I2*>(input2_);
O* output = static_cast<O*>(output_);
for (std::size_t i = 0; i < inputLenght; ++i) {
//TODO: handle Div of two tensors the same size
output[i] = input_1[i] / input_2[0];
if (input2Length == input1Length)
{
for (std::size_t i = 0; i < input1Length; ++i) {
output[i] = input_1[i] / input_2[i];
}
}
else if (input2Length == 1)
{
for (std::size_t i = 0; i < input1Length; ++i) {
output[i] = input_1[i] / input_2[0];
}
}
}
namespace {
// TODO: add support for Div(float, int)
static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32, DataType::Float32},
Aidge::DivImpl_cpu_forward_kernel<float, float, float>);
......
......@@ -24,10 +24,10 @@ namespace Aidge {
// compute kernel registry for forward and backward
class PowImplForward_cpu
: public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*,void*)> {
: public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
};
class PowImplBackward_cpu
: public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
: public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
};
class PowImpl_cpu : public OperatorImpl {
......
......@@ -14,12 +14,13 @@
#include "aidge/utils/Registrar.hpp"
#include <cmath>
#include <iostream>
#include "aidge/backend/cpu/operator/PowImpl.hpp"
namespace Aidge {
template <class I1, class I2, class O>
void PowImpl_cpu_forward_kernel(std::size_t inputLenght,
void PowImpl_cpu_forward_kernel(std::size_t input1Length,
std::size_t input2Length,
const void* input1_,
const void* input2_,
void* output_) {
......@@ -28,14 +29,21 @@ void PowImpl_cpu_forward_kernel(std::size_t inputLenght,
const I2* input_2 = static_cast<const I2*>(input2_);
O* output = static_cast<O*>(output_);
for (std::size_t i = 0; i < inputLenght; ++i) {
//TODO: handle pow of two tensors the same size
output[i] = std::pow(input_1[i], input_2[0]);
if (input2Length == input1Length)
{
for (std::size_t i = 0; i < input1Length; ++i) {
output[i] = std::pow(input_1[i], input_2[i]);
}
}
else if (input2Length == 1)
{
for (std::size_t i = 0; i < input1Length; ++i) {
output[i] = std::pow(input_1[i], input_2[0]);
}
}
}
namespace {
// TODO: add support for pow(float, int)
static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Float32(
{DataType::Float32, DataType::Float32, DataType::Float32},
Aidge::PowImpl_cpu_forward_kernel<float, float, float>);
......
......@@ -30,6 +30,11 @@ void Aidge::DivImpl_cpu::forward() {
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1");
// TODO add support for when input1 is a 1d tensor of size the channels of input0
assert(((mOp.getInput(1)->size() == 1) ||
(mOp.getInput(1)->size() == mOp.getInput(0)->size())) &&
"input #1 must either be a tensor of size 1 or the same size of input #0");
// Find the correct kernel type
auto kernelFunc = Registrar<DivImplForward_cpu>::create({
mOp.getInput(0)->dataType(),
......@@ -38,6 +43,7 @@ void Aidge::DivImpl_cpu::forward() {
// Call kernel
kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
......
......@@ -29,6 +29,11 @@ Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
void Aidge::PowImpl_cpu::forward() {
assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1");
// TODO add support for when input1 is a 1d tensor of size the channels of input0
assert(((mOp.getInput(1)->size() == 1) ||
(mOp.getInput(1)->size() == mOp.getInput(0)->size())) &&
"input #1 must either be a tensor of size 1 or the same size of input #0");
// Find the correct kernel type
auto kernelFunc = Registrar<PowImplForward_cpu>::create({
......@@ -38,6 +43,7 @@ void Aidge::PowImpl_cpu::forward() {
// Call kernel
kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
mOp.getInput(0)->getImpl()->rawPtr(),
mOp.getInput(1)->getImpl()->rawPtr(),
mOp.getOutput(0)->getImpl()->rawPtr());
......
......@@ -21,7 +21,7 @@
using namespace Aidge;
TEST_CASE("[cpu/operator] Div(forward)") {
SECTION("2D Tensor") {
SECTION("2D Tensor by Singleton") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{0.07607108, 0.44075000},
......@@ -52,6 +52,42 @@ TEST_CASE("[cpu/operator] Div(forward)") {
}
SECTION("2D Tensors") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{0.79780143, 0.49322051},
{0.84239346, 0.83737719}
}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,2,2>{
{
{0.59088874, 0.78858775},
{0.42879432, 0.17615074}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{1.35017204, 0.62544787},
{1.96456301, 4.75375366}
}
});
std::shared_ptr<Node> myDiv = Div();
myDiv->getOperator()->setDatatype(DataType::Float32);
myDiv->getOperator()->setBackend("cpu");
myDiv->getOperator()->associateInput(0, input_1);
myDiv->getOperator()->associateInput(1, input_2);
myDiv->getOperator()->computeOutputDims();
myDiv->forward();
float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 4; ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
SECTION("4D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
{
......
......@@ -21,7 +21,7 @@
using namespace Aidge;
TEST_CASE("[cpu/operator] Pow(forward)") {
SECTION("2D Tensor") {
SECTION("2D Tensor by Singleton") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{0.42139274, 0.51524192},
......@@ -52,6 +52,42 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
}
SECTION("2D Tensors") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{0.79780143, 0.49322051},
{0.84239346, 0.83737719}
}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array2D<float,2,2>{
{
{0.59088874, 0.78858775},
{0.42879432, 0.17615074}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
{
{0.87504572, 0.57271165},
{0.92909741, 0.96922028}
}
});
std::shared_ptr<Node> myPow = Pow();
myPow->getOperator()->setDatatype(DataType::Float32);
myPow->getOperator()->setBackend("cpu");
myPow->getOperator()->associateInput(0, input_1);
myPow->getOperator()->associateInput(1, input_2);
myPow->getOperator()->computeOutputDims();
myPow->forward();
float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 4; ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
SECTION("4D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
{
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment