Skip to content
Snippets Groups Projects
Commit aa10278f authored by Maxence Naud's avatar Maxence Naud
Browse files

Fix tests gather/slice/horizontalTiling

parent f0fd3c3a
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!93Change Gather and Slice's attributes into intputs
Pipeline #44669 canceled
......@@ -22,11 +22,9 @@
void Aidge::Gather_OpImpl::forward() {
const Gather_Op& op = dynamic_cast<const Gather_Op&>(mOp);
const auto axis = op.template getAttr<std::int64_t>("Axis");
const auto axis = op.template getAttr<std::int8_t>("Axis");
const std::size_t axisIdx = axis>=0 ?
axis :
static_cast<std::size_t>(axis) + op.getInput(0)->dims().size();
const std::size_t axisIdx = static_cast<std::size_t>(axis) + (axis >= 0 ? 0 : op.getInput(0)->dims().size());
std::size_t postAxisElems = 1;
for (std::size_t i = axisIdx + 1; i < op.getInput(0)->dims().size(); ++i) {
......@@ -37,13 +35,15 @@ void Aidge::Gather_OpImpl::forward() {
preAxisElems *= op.getInput(0)->dims()[i];
}
const auto indices = op.template getAttr<std::vector<std::int64_t>>("Indices");
const auto indices = static_cast<const int*>(op.getInput(1)->getImpl()->rawPtr());
std::size_t outputOffset = 0;
for (std::size_t i=0; i<preAxisElems; ++i)
{
for(std::size_t j=0; j<indices.size(); ++j)
for(std::size_t j=0; j<op.getInput(1)->size(); ++j)
{
const std::size_t idx = indices[j] >= 0 ? indices[j] : static_cast<std::size_t>(indices[j]) + op.getInput(0)->dims()[axisIdx];
const std::size_t idx = indices[j] >= 0 ?
static_cast<std::size_t>(indices[j]) :
static_cast<std::size_t>(indices[j] + static_cast<int>(op.getInput(0)->dims()[axisIdx]));
op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i * postAxisElems * op.getInput(0)->dims()[axisIdx] + idx * postAxisElems), postAxisElems, outputOffset);
outputOffset += postAxisElems;
}
......@@ -69,7 +69,7 @@ bool Aidge::Gather_Op::forwardDims(bool /*allowDataDependency*/) {
this->template getAttr<GatherAttr::Axis>():
this->template getAttr<GatherAttr::Axis>()+outDims.size();
outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
if( indicesDims[0]>0 ) // In case indices is a scalar indicesDims is a 0
if( indicesDims[0]>0 ) // In case indices is a scalar indicesDims is a 0
{
outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), indicesDims.begin(),indicesDims.end());
}
......
......@@ -34,32 +34,35 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
const Aidge::DimIdx_t axis,
const std::size_t nbSlices)
{
// for now, Tiling works only with Conv Operators
if (node->getOperator()->type() != "Conv") {
AIDGE_INTERNAL_ASSERT("Operator should be a Convolution.");
}
AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
// TODO: back when tiling works with other Operators
// AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
const auto& op = std::static_pointer_cast<OperatorTensor>(node->getOperator());
if (op->nbOutputs() != 1 || op->nbData() > 1) {
AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
}
// TODO: back when tiling works with other Operators
// if (op->nbOutputs() != 1 || op->nbData() > 1) {
// AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now.");
// }
if (!op->dimsForwarded()) {
AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling");
}
const std::shared_ptr<Tensor>& outTensor = op->getOutput(0);
std::vector<DimSize_t> outputDims = outTensor->dims();
// start by doing a tiling with strict dimensions division
const auto& outTensor = op->getOutput(0);
if (op->getOutput(0)->dims()[axis] % nbSlices != 0) {
if (outputDims[axis] % nbSlices != 0) {
AIDGE_INTERNAL_ASSERT("axis should be a multiple of nbSlices");
}
// dimensions of a Slice
std::vector<DimSize_t> outputDims = outTensor->dims();
outputDims[axis] /= nbSlices;
std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
std::set<std::shared_ptr<Aidge::Node>> res;
auto concat = Concat(nbSlices, axis);
res.insert(concat);
std::set<std::shared_ptr<Aidge::Node>> tiledOperator{concat};
// check slice sizes
// const auto inputDims = op->computeReceptiveField(currentFirstDims[axis], outputDims, 0);
......@@ -73,10 +76,13 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr);
for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) {
clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators();
clonedInputs[i] -> setName(node -> name() + "_0");
res.insert(clonedInputs[i]);
clonedInputs[i] -> setName(node -> getParent(i) -> name() + "_0");
tiledOperator.insert(clonedInputs[i]);
}
const std::vector<std::string> sliceInputsNames = Slice_Op::getInputsName();
// coordinates of the first value of the current output slice
std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0);
for (IOIndex_t i = 0; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis], ++i) {
const auto inputDims = op->computeReceptiveField(currentFirstDims, outputDims, 0);
auto newNode = node -> clone(); // no input associated to clones
......@@ -86,7 +92,6 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
auto backend = outTensor->getImpl()->backend();
auto slice = Slice("Slice_" + std::to_string(currentFirstDims[axis]));
auto sliceInputsNames = slice->getOperator()->getInputsName();
// Create Slice's Starts producer node
std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
......@@ -96,8 +101,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
starts -> setDataType(DataType::Int64);
starts -> setBackend(backend);
starts -> resize(std::vector<std::size_t>({inputDimsStart.size()}));
starts -> getImpl() -> setRawPtr(inputDimsStart.data(), inputDimsStart.size());
auto startsNode = Producer(starts, sliceInputsNames[1]);
starts -> getImpl() -> copyFromHost(inputDimsStart.data(), inputDimsStart.size());
auto startsNode = Producer(starts, slice->name() + sliceInputsNames[1]);
startsNode -> addChild(slice, 0, 1);
std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
......@@ -108,8 +113,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
ends -> setDataType(DataType::Int64);
ends -> setBackend(backend);
ends -> resize(std::vector<std::size_t>({inputDimsEnd.size()}));
ends -> getImpl() -> setRawPtr(inputDimsEnd.data(), inputDimsEnd.size());
auto endsNode = Producer(ends, sliceInputsNames[2]);
ends -> getImpl() -> copyFromHost(inputDimsEnd.data(), inputDimsEnd.size());
auto endsNode = Producer(ends, slice->name() + sliceInputsNames[2]);
endsNode -> addChild(slice, 0, 2);
// Create Slice's Axes producer node
......@@ -120,16 +125,15 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
axes -> setDataType(DataType::Int64);
axes -> setBackend(backend);
axes -> resize(std::vector<std::size_t>({usedDims.size()}));
axes -> getImpl() -> setRawPtr(usedDims.data(), usedDims.size());
auto axesNode = Producer(axes, sliceInputsNames[3]);
axes -> getImpl() -> copyFromHost(usedDims.data(), usedDims.size());
auto axesNode = Producer(axes, slice->name() + sliceInputsNames[3]);
axesNode -> addChild(slice, 0, 3);
slice -> addChild(newNode, 0, 0);
newNode -> addChild(concat, 0, i);
res.insert(slice);
res.insert(newNode);
tiledOperator.insert({slice, newNode, startsNode, endsNode, axesNode});
}
return res;
return tiledOperator;
}
\ No newline at end of file
......@@ -14,12 +14,13 @@
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Gather.hpp"
#include <cstdint>
#include <memory>
using namespace Aidge;
TEST_CASE("[cpu/operator] Gather(forward)") {
TEST_CASE("[cpu/operator] Gather(forward)", "[Gather][CPU]") {
SECTION("2D Tensor axis 0") {
std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<int,3,3> {
{
......@@ -42,10 +43,10 @@ TEST_CASE("[cpu/operator] Gather(forward)") {
}
});
std::shared_ptr<Node> myGather = Gather({1, 2}, {1, 2}, 0);
std::shared_ptr<Node> myGather = Gather(std::int8_t(0));
auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
op->associateInput(0,input);
// op->associateInput(1,indexes);
op->associateInput(1,indexes);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myGather->forward();
......@@ -82,10 +83,10 @@ TEST_CASE("[cpu/operator] Gather(forward)") {
}
});
std::shared_ptr<Node> myGather = Gather({0, 2}, {1, 2}, 1);
std::shared_ptr<Node> myGather = Gather(1);
auto op = std::static_pointer_cast<OperatorTensor>(myGather -> getOperator());
op->associateInput(0,input);
// op->associateInput(1,indexes);
op->associateInput(1,indexes);
op->setDataType(DataType::Int32);
op->setBackend("cpu");
myGather->forward();
......
......@@ -19,15 +19,21 @@ using namespace Aidge;
TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
SECTION("1D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
{0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
{0, 1, -2,-3, 4,-5,-6, 7, 8, 9}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,4> {
{0, 1, 2,-3}
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,3> {
{0, 1, -2}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,1>{{0}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,1>{{3}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,1>{{0}});
std::shared_ptr<Node> mySlice = Slice({0}, {3}, {0});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
......@@ -50,10 +56,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
{-5,-6, 7}
}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,2>{{0,5}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,2>{{2,8}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,2>{{0,1}});
std::shared_ptr<Node> mySlice = Slice({0,5}, {1,7}, {0,1});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
......@@ -83,10 +95,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
}
}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,3>{{0,1,4}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,3>{{1,2,7}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,3>{{0,1,2}});
std::shared_ptr<Node> mySlice = Slice({0,1,4}, {0,1,6}, {0,1,2});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
......@@ -145,10 +163,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
}
}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,4>{{0,0,0,0}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,4>{{2,2,2,10}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,4>{{0,1,2,3}});
std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,9}, {0,1,2,3});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment