Skip to content
Snippets Groups Projects
Commit 5506a972 authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

add support for operations of tensor with different dims

parent c4e12eee
No related branches found
No related tags found
1 merge request!19Binary operators
...@@ -39,6 +39,13 @@ void DivImpl_cpu_forward_kernel(std::size_t input1Length, ...@@ -39,6 +39,13 @@ void DivImpl_cpu_forward_kernel(std::size_t input1Length,
output[i] = input_1[i] / input_2[0]; output[i] = input_1[i] / input_2[0];
} }
} }
else // input_2 is 1d and of size the number of channels of input_1
{
for (std::size_t i = 0; i < input1Length; ++i) {
std::size_t channelIdx = i % input2Length;
output[i] = input_1[i] / input_2[channelIdx];
}
}
} }
namespace { namespace {
......
...@@ -41,6 +41,13 @@ void PowImpl_cpu_forward_kernel(std::size_t input1Length, ...@@ -41,6 +41,13 @@ void PowImpl_cpu_forward_kernel(std::size_t input1Length,
output[i] = std::pow(input_1[i], input_2[0]); output[i] = std::pow(input_1[i], input_2[0]);
} }
} }
else // input_2 is 1d and of size the number of channels of input_1
{
for (std::size_t i = 0; i < input1Length; ++i) {
std::size_t channelIdx = i % input2Length;
output[i] = std::pow(input_1[i], input_2[channelIdx]);
}
}
} }
namespace { namespace {
......
...@@ -30,10 +30,11 @@ void Aidge::DivImpl_cpu::forward() { ...@@ -30,10 +30,11 @@ void Aidge::DivImpl_cpu::forward() {
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1"); assert(mOp.getInput(1) && "missing input #1");
// TODO add support for when input1 is a 1d tensor of size the channels of input0
assert(((mOp.getInput(1)->size() == 1) || assert(((mOp.getInput(1)->size() == 1) ||
(mOp.getInput(1)->size() == mOp.getInput(0)->size())) && (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
"input #1 must either be a tensor of size 1 or the same size of input #0"); (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
) &&
"input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
// Find the correct kernel type // Find the correct kernel type
auto kernelFunc = Registrar<DivImplForward_cpu>::create({ auto kernelFunc = Registrar<DivImplForward_cpu>::create({
......
...@@ -29,11 +29,12 @@ Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_ ...@@ -29,11 +29,12 @@ Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_
void Aidge::PowImpl_cpu::forward() { void Aidge::PowImpl_cpu::forward() {
assert(mOp.getInput(0) && "missing input #0"); assert(mOp.getInput(0) && "missing input #0");
assert(mOp.getInput(1) && "missing input #1"); assert(mOp.getInput(1) && "missing input #1");
// TODO add support for when input1 is a 1d tensor of size the channels of input0
assert(((mOp.getInput(1)->size() == 1) || assert(((mOp.getInput(1)->size() == 1) ||
(mOp.getInput(1)->size() == mOp.getInput(0)->size())) && (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
"input #1 must either be a tensor of size 1 or the same size of input #0"); (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
) &&
"input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
// Find the correct kernel type // Find the correct kernel type
auto kernelFunc = Registrar<PowImplForward_cpu>::create({ auto kernelFunc = Registrar<PowImplForward_cpu>::create({
......
...@@ -88,6 +88,44 @@ TEST_CASE("[cpu/operator] Div(forward)") { ...@@ -88,6 +88,44 @@ TEST_CASE("[cpu/operator] Div(forward)") {
} }
SECTION("3D Tensor by 1D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
{
{{0.24180168, 0.44319558, 0.06437260},
{0.21270001, 0.34570599, 0.44151264}},
{{0.62294692, 0.98043168, 0.18628585},
{0.33591706, 0.03432965, 0.32130069}}
}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<float,3>{
{0.63475525, 0.58620811, 0.69340748}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
{
{{0.38093686, 0.75603795, 0.09283517},
{0.33508980, 0.58973253, 0.63672900}},
{{0.98139703, 1.67249763, 0.26865280},
{0.52920723, 0.05856223, 0.46336490}}
}
});
std::shared_ptr<Node> myDiv = Div();
myDiv->getOperator()->setDatatype(DataType::Float32);
myDiv->getOperator()->setBackend("cpu");
myDiv->getOperator()->associateInput(0, input_1);
myDiv->getOperator()->associateInput(1, input_2);
myDiv->getOperator()->computeOutputDims();
myDiv->forward();
float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 4; ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
SECTION("4D Tensor") { SECTION("4D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> { std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
{ {
......
...@@ -52,6 +52,45 @@ TEST_CASE("[cpu/operator] Pow(forward)") { ...@@ -52,6 +52,45 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
} }
SECTION("3D Tensor by 1D Tensor") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
{
{{0.87519985, 0.10536593, 0.20268351},
{0.75532353, 0.95977652, 0.03897029}},
{{0.67554104, 0.35499334, 0.27741563},
{0.94270861, 0.48397779, 0.35532343}}
}
});
std::shared_ptr<Tensor> input_2 = std::make_shared<Tensor>(Array1D<float,3>{
{0.39333701, 0.08719915, 0.16713941}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
{
{{0.94891787, 0.82182676, 0.76584703},
{0.89549923, 0.99642646, 0.58137459}},
{{0.85702944, 0.91364944, 0.80709606},
{0.97706109, 0.93867886, 0.84118503}}
}
});
std::shared_ptr<Node> myPow = Pow();
myPow->getOperator()->setDatatype(DataType::Float32);
myPow->getOperator()->setBackend("cpu");
myPow->getOperator()->associateInput(0, input_1);
myPow->getOperator()->associateInput(1, input_2);
myPow->getOperator()->computeOutputDims();
myPow->forward();
float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
for (std::size_t i = 0; i< 4; ++i) {
REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
}
}
SECTION("2D Tensors") { SECTION("2D Tensors") {
std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> { std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
{ {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment