Skip to content
Snippets Groups Projects
Commit cc38a58b authored by Houssem ROUIS's avatar Houssem ROUIS
Browse files

move SliceImpl to backend_cpu

parent e9fdb3b6
No related branches found
No related tags found
No related merge requests found
...@@ -24,11 +24,11 @@ ...@@ -24,11 +24,11 @@
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
namespace Aidge { namespace Aidge {
class Slice_OpImpl : public OperatorImpl { // class Slice_OpImpl : public OperatorImpl {
public: // public:
Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} // Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
void forward() override; // void forward() override;
}; // };
enum class SliceAttr { Starts, Ends, Axes, Steps }; enum class SliceAttr { Starts, Ends, Axes, Steps };
...@@ -50,9 +50,7 @@ public: ...@@ -50,9 +50,7 @@ public:
attr<SliceAttr::Ends>(ends), attr<SliceAttr::Ends>(ends),
attr<SliceAttr::Axes>(axes), attr<SliceAttr::Axes>(axes),
attr<SliceAttr::Steps>(steps)) attr<SliceAttr::Steps>(steps))
{ {}
mImpl = std::make_shared<Slice_OpImpl>(*this);
}
/** /**
...@@ -68,7 +66,7 @@ public: ...@@ -68,7 +66,7 @@ public:
SET_IMPL_MACRO(Slice_Op, *this, op.backend()); SET_IMPL_MACRO(Slice_Op, *this, op.backend());
} }
else { else {
mImpl = std::make_shared<Slice_OpImpl>(*this); mImpl = nullptr;
} }
} }
...@@ -111,21 +109,21 @@ template <> ...@@ -111,21 +109,21 @@ template <>
const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Starts", "Ends", "Axes", "Steps" }; const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Starts", "Ends", "Axes", "Steps" };
} }
namespace Aidge { // namespace Aidge {
class SliceImplForward // class SliceImplForward
: public Registrable<SliceImplForward, // : public Registrable<SliceImplForward,
std::tuple<DataType>, // std::tuple<DataType>,
void(const Slice_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; // void(const Slice_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
template <typename I> // template <typename I>
void Slice_forward_kernel(const Slice_Op::Attrs &attrs, const std::vector<DimSize_t>&inputDims, const void *input_, void *output_); // void Slice_forward_kernel(const Slice_Op::Attrs &attrs, const std::vector<DimSize_t>&inputDims, const void *input_, void *output_);
namespace { // namespace {
static Registrar<SliceImplForward> registrarSliceImplForward_Float32( // static Registrar<SliceImplForward> registrarSliceImplForward_Float32(
{DataType::Float32}, Slice_forward_kernel<float>); // {DataType::Float32}, Slice_forward_kernel<float>);
static Registrar<SliceImplForward> registrarSliceImplForward_Int32( // static Registrar<SliceImplForward> registrarSliceImplForward_Int32(
{DataType::Int32}, Slice_forward_kernel<int>); // {DataType::Int32}, Slice_forward_kernel<int>);
static Registrar<SliceImplForward> registrarSliceImplForward_Int64( // static Registrar<SliceImplForward> registrarSliceImplForward_Int64(
{DataType::Float64}, Slice_forward_kernel<double>); // {DataType::Float64}, Slice_forward_kernel<double>);
} // }
} // }
#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
...@@ -26,92 +26,6 @@ ...@@ -26,92 +26,6 @@
#include "aidge/data/Tensor.hpp" #include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h" #include "aidge/utils/Types.h"
#include "aidge/data/Data.hpp"
#include "aidge/utils/Registrar.hpp"
template<class I>
void Aidge::Slice_forward_kernel(const Slice_Op::Attrs &attrs, const std::vector<DimSize_t>&inputDims, const void *input_, void *output_){
const I* input = static_cast<const I*>(input_);
I* output = static_cast<I*>(output_);
const std::size_t nbDims = inputDims.size();
std::vector<DimSize_t> dims = inputDims;
DimSize_t totalSize = std::accumulate(inputDims.cbegin(), inputDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
I* outputAccumulation = new I[totalSize];
const I* inputAccumulation = input;
const std::size_t nbAxes = std::get<0>(attrs).size();
for (std::size_t i = 0; i < nbAxes; ++i) {
DimIdx_t axis = std::get<2>(attrs)[i] >= 0 ?
static_cast<DimIdx_t>(std::get<2>(attrs)[i]) :
static_cast<DimIdx_t>(std::get<2>(attrs)[i] + static_cast<DimIdx_t>(inputDims.size()));
std::int64_t start = std::get<0>(attrs)[i] >= 0 ?
std::get<0>(attrs)[i] :
std::get<0>(attrs)[i] + static_cast<std::int64_t>(inputDims[axis]);
std::int64_t end = std::get<1>(attrs)[i] >= 0 ?
std::get<1>(attrs)[i] :
std::get<1>(attrs)[i] + static_cast<std::int64_t>(inputDims[axis]);
std::int64_t step = std::get<3>(attrs)[i];
std::size_t sliceSize = static_cast<std::size_t>((end - start) / std::abs(step));
if ( i > 0) {
outputAccumulation = new I[totalSize];
}
const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>());
const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + nbDims -1 - axis, 1, std::multiplies<std::size_t>());
std::int64_t firstElem = step > 0 ? start : end;
std::int64_t lastElem = step > 0 ? end : start;
for (std::size_t outer = 0; outer < stride_pre; outer++)
{
std::size_t addedSlices = 0;
for (std::int64_t inner = firstElem; inner < lastElem; inner+=step)
{
size_t idx = outer * stride_post * dims[axis] + inner * stride_post;
size_t idx_out = outer * stride_post * sliceSize + addedSlices * stride_post;
if (idx < totalSize) {
std::copy_n(std::next(inputAccumulation, idx), stride_post, std::next(outputAccumulation, idx_out));
}
addedSlices++;
}
}
totalSize /= dims[axis];
totalSize *= sliceSize;
dims[axis] = sliceSize;
if (inputAccumulation != input) {
delete[] inputAccumulation;
}
inputAccumulation = outputAccumulation;
}
// Copy elements from inputAccumulation to output while dividing by divisor
std::copy_n(inputAccumulation, totalSize, output);
// op.getOutput(0)->getImpl()->copy(inputAccumulation, totalSize);
if (outputAccumulation) {
delete[] outputAccumulation;
}
}
void Aidge::Slice_OpImpl::forward() {
const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);
if (!op.getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type);
}
AIDGE_ASSERT((op.template getAttr<SliceAttr::Starts>().size() == op.template getAttr<SliceAttr::Ends>().size()) &&
(op.template getAttr<SliceAttr::Starts>().size() == op.template getAttr<SliceAttr::Axes>().size()),
"start, end and axes arguments should be the same size.");
// Find the correct kernel type
auto kernelFunc =
Registrar<SliceImplForward>::create({std::static_pointer_cast<Tensor>(op.getRawInput(0))->dataType()});
// Call kernel
kernelFunc(dynamic_cast<const Slice_Op&>(mOp).getStaticAttributes(),
std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
std::static_pointer_cast<Tensor>(op.getInput(0))->getImpl()->hostPtr(),
std::static_pointer_cast<Tensor>(op.getOutput(0))->getImpl()->hostPtr());
}
const std::string Aidge::Slice_Op::Type = "Slice"; const std::string Aidge::Slice_Op::Type = "Slice";
...@@ -254,11 +168,6 @@ bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) { ...@@ -254,11 +168,6 @@ bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) {
} }
void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
if (Registrar<Slice_Op>::exists({name})){ SET_IMPL_MACRO(Slice_Op, *this, name);
SET_IMPL_MACRO(Slice_Op, *this, name);
}
else {
mImpl = std::make_shared<Slice_OpImpl>(*this);
}
mOutputs[0]->setBackend(name, device); mOutputs[0]->setBackend(name, device);
} }
...@@ -107,8 +107,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: ...@@ -107,8 +107,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int8_t>(0)); std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int8_t>(0));
// Create Slice's Steps attribute // Create Slice's Steps attribute
std::vector<std::int64_t> steps(inputDimsEnd.size()); std::vector<std::int64_t> steps(inputDimsEnd.size(), static_cast<std::int64_t>(1));
std::iota(steps.begin(), steps.end(), static_cast<std::int64_t>(1));
auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, steps, "Slice_" + std::to_string(currentFirstDims[axis])); auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, steps, "Slice_" + std::to_string(currentFirstDims[axis]));
slice -> addChild(newNode, 0, 0); slice -> addChild(newNode, 0, 0);
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include <catch2/catch_test_macros.hpp>
#include "aidge/data/Tensor.hpp"
#include "aidge/operator/Slice.hpp"
using namespace Aidge;
TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
SECTION("1D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
{0, 1, -2,-3, 4,-5,-6, 7, 8, 9}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,3> {
{0, 1, -2}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,1>{{0}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,1>{{3}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,1>{{0}});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
REQUIRE(*(op->getOutput(0)) == *expectedOutput);
REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
}
SECTION("2D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> {
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,3> {
{
{-5,-6, 7},
{-5,-6, 7}
}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,2>{{0,5}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,2>{{2,8}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,2>{{0,1}});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *expectedOutput);
REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
}
SECTION("3D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> {
{
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,1,3> {
{
{
{ 4,-5,-6}
}
}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,3>{{0,1,4}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,3>{{1,2,7}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,3>{{0,1,2}});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
// mySlice->getOperator()->output(0).print();
REQUIRE(*(op->getOutput(0)) == *expectedOutput);
REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
}
SECTION("4D Tensor") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
{
{
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
},
{
{
{ 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3,11,-5,-6, 7,-1,10}
}
}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
{
{
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
},
{
{
{ 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3,11,-5,-6, 7,-1,10}
}
}
}
});
std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,4>{{0,0,0,0}});
std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,4>{{2,2,2,10}});
std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,4>{{0,1,2,3}});
std::shared_ptr<Node> mySlice = Slice();
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->associateInput(1,starts);
mySlice->getOperator()->associateInput(2,ends);
mySlice->getOperator()->associateInput(3,axes);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
// op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *expectedOutput);
REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
}
SECTION("Attributes instead of inputs") {
std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> {
{
{
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
}
},
{
{
{ 0, 1, 2,-3, 6,-5,-6, 7, 8, 9},
{-5, 4, 2,-3, 4,-5,-6, 7,-1,10}
},
{
{ 0, 1, 2,-3, 4,-5,-6, 7, 8, 9},
{-5, 4, 2,-3,11,-5,-6, 7,-1,10}
}
}
}
});
std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,1,1,5> {
{
{
{
{ 0, 1, 2,-3, 4}
}
}
}
});
std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,5}, {0,1,2,3}, {1,1,1,1});
auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
mySlice->getOperator()->associateInput(0,input0);
mySlice->getOperator()->setDataType(DataType::Int32);
mySlice->getOperator()->setBackend("cpu");
mySlice->forward();
// op->getOutput(0)->print();
REQUIRE(*(op->getOutput(0)) == *expectedOutput);
REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims());
REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType());
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment