diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 01dc85504df81bff342ad69702cbc2164e44ab4c..f3afa67a7614b1e525012fca4f4ecead4546846a 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -24,11 +24,11 @@ #include "aidge/utils/Types.h" namespace Aidge { -class Slice_OpImpl : public OperatorImpl { -public: - Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} - void forward() override; -}; +// class Slice_OpImpl : public OperatorImpl { +// public: +// Slice_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {} +// void forward() override; +// }; enum class SliceAttr { Starts, Ends, Axes, Steps }; @@ -50,9 +50,7 @@ public: attr<SliceAttr::Ends>(ends), attr<SliceAttr::Axes>(axes), attr<SliceAttr::Steps>(steps)) - { - mImpl = std::make_shared<Slice_OpImpl>(*this); - } + {} /** @@ -68,7 +66,7 @@ public: SET_IMPL_MACRO(Slice_Op, *this, op.backend()); } else { - mImpl = std::make_shared<Slice_OpImpl>(*this); + mImpl = nullptr; } } @@ -111,21 +109,21 @@ template <> const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Starts", "Ends", "Axes", "Steps" }; } -namespace Aidge { - class SliceImplForward - : public Registrable<SliceImplForward, - std::tuple<DataType>, - void(const Slice_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; - template <typename I> - void Slice_forward_kernel(const Slice_Op::Attrs &attrs, const std::vector<DimSize_t>&inputDims, const void *input_, void *output_); - -namespace { -static Registrar<SliceImplForward> registrarSliceImplForward_Float32( - {DataType::Float32}, Slice_forward_kernel<float>); -static Registrar<SliceImplForward> registrarSliceImplForward_Int32( - {DataType::Int32}, Slice_forward_kernel<int>); -static Registrar<SliceImplForward> registrarSliceImplForward_Int64( - {DataType::Float64}, Slice_forward_kernel<double>); -} -} +// namespace Aidge { +// class SliceImplForward +// : public Registrable<SliceImplForward, +// std::tuple<DataType>, +// void(const Slice_Op::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {}; +// template <typename I> +// void Slice_forward_kernel(const Slice_Op::Attrs &attrs, const std::vector<DimSize_t>&inputDims, const void *input_, void *output_); + +// namespace { +// static Registrar<SliceImplForward> registrarSliceImplForward_Float32( +// {DataType::Float32}, Slice_forward_kernel<float>); +// static Registrar<SliceImplForward> registrarSliceImplForward_Int32( +// {DataType::Int32}, Slice_forward_kernel<int>); +// static Registrar<SliceImplForward> registrarSliceImplForward_Int64( +// {DataType::Float64}, Slice_forward_kernel<double>); +// } +// } #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */ diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp index 166442bd8bcdd33d4d505d1ecbeef411a4bd1f28..0a486e37a36b02c0a8252036ee34ca805bae725c 100644 --- a/src/operator/Slice.cpp +++ b/src/operator/Slice.cpp @@ -26,92 +26,6 @@ #include "aidge/data/Tensor.hpp" #include "aidge/utils/ErrorHandling.hpp" #include "aidge/utils/Types.h" -#include "aidge/data/Data.hpp" -#include "aidge/utils/Registrar.hpp" - -template<class I> -void Aidge::Slice_forward_kernel(const Slice_Op::Attrs &attrs, const std::vector<DimSize_t>&inputDims, const void *input_, void *output_){ - const I* input = static_cast<const I*>(input_); - I* output = static_cast<I*>(output_); - - const std::size_t nbDims = inputDims.size(); - std::vector<DimSize_t> dims = inputDims; - DimSize_t totalSize = std::accumulate(inputDims.cbegin(), inputDims.cend(), std::size_t(1), std::multiplies<std::size_t>()); - I* outputAccumulation = new I[totalSize]; - const I* inputAccumulation = input; - const std::size_t nbAxes = std::get<0>(attrs).size(); - for (std::size_t i = 0; i < nbAxes; ++i) { - DimIdx_t axis = std::get<2>(attrs)[i] >= 0 ? - static_cast<DimIdx_t>(std::get<2>(attrs)[i]) : - static_cast<DimIdx_t>(std::get<2>(attrs)[i] + static_cast<DimIdx_t>(inputDims.size())); - std::int64_t start = std::get<0>(attrs)[i] >= 0 ? - std::get<0>(attrs)[i] : - std::get<0>(attrs)[i] + static_cast<std::int64_t>(inputDims[axis]); - std::int64_t end = std::get<1>(attrs)[i] >= 0 ? - std::get<1>(attrs)[i] : - std::get<1>(attrs)[i] + static_cast<std::int64_t>(inputDims[axis]); - std::int64_t step = std::get<3>(attrs)[i]; - - std::size_t sliceSize = static_cast<std::size_t>((end - start) / std::abs(step)); - - if ( i > 0) { - outputAccumulation = new I[totalSize]; - } - const std::size_t stride_pre = std::accumulate(dims.cbegin(), dims.cbegin() + axis, 1, std::multiplies<std::size_t>()); - const std::size_t stride_post = std::accumulate(dims.crbegin(), dims.crbegin() + nbDims -1 - axis, 1, std::multiplies<std::size_t>()); - std::int64_t firstElem = step > 0 ? start : end; - std::int64_t lastElem = step > 0 ? end : start; - - for (std::size_t outer = 0; outer < stride_pre; outer++) - { - std::size_t addedSlices = 0; - for (std::int64_t inner = firstElem; inner < lastElem; inner+=step) - { - size_t idx = outer * stride_post * dims[axis] + inner * stride_post; - size_t idx_out = outer * stride_post * sliceSize + addedSlices * stride_post; - if (idx < totalSize) { - std::copy_n(std::next(inputAccumulation, idx), stride_post, std::next(outputAccumulation, idx_out)); - } - addedSlices++; - } - } - totalSize /= dims[axis]; - totalSize *= sliceSize; - dims[axis] = sliceSize; - - if (inputAccumulation != input) { - delete[] inputAccumulation; - } - inputAccumulation = outputAccumulation; - - } - // Copy elements from inputAccumulation to output while dividing by divisor - std::copy_n(inputAccumulation, totalSize, output); - // op.getOutput(0)->getImpl()->copy(inputAccumulation, totalSize); - if (outputAccumulation) { - delete[] outputAccumulation; - } -} - -void Aidge::Slice_OpImpl::forward() { - const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp); - - if (!op.getInput(0)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", op.Type); - } - AIDGE_ASSERT((op.template getAttr<SliceAttr::Starts>().size() == op.template getAttr<SliceAttr::Ends>().size()) && - (op.template getAttr<SliceAttr::Starts>().size() == op.template getAttr<SliceAttr::Axes>().size()), - "start, end and axes arguments should be the same size."); - // Find the correct kernel type - auto kernelFunc = - Registrar<SliceImplForward>::create({std::static_pointer_cast<Tensor>(op.getRawInput(0))->dataType()}); - - // Call kernel - kernelFunc(dynamic_cast<const Slice_Op&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), - std::static_pointer_cast<Tensor>(op.getInput(0))->getImpl()->hostPtr(), - std::static_pointer_cast<Tensor>(op.getOutput(0))->getImpl()->hostPtr()); -} const std::string Aidge::Slice_Op::Type = "Slice"; @@ -254,11 +168,6 @@ bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) { } void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { - if (Registrar<Slice_Op>::exists({name})){ - SET_IMPL_MACRO(Slice_Op, *this, name); - } - else { - mImpl = std::make_shared<Slice_OpImpl>(*this); - } + SET_IMPL_MACRO(Slice_Op, *this, name); mOutputs[0]->setBackend(name, device); } diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp index e0ce58d315f56587f41da6a3b1913c968c289825..342a4afe6c464898053d5046679c7b05c67552e0 100644 --- a/src/recipes/HorizontalTiling.cpp +++ b/src/recipes/HorizontalTiling.cpp @@ -107,8 +107,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int8_t>(0)); // Create Slice's Steps attribute - std::vector<std::int64_t> steps(inputDimsEnd.size()); - std::iota(steps.begin(), steps.end(), static_cast<std::int64_t>(1)); + std::vector<std::int64_t> steps(inputDimsEnd.size(), static_cast<std::int64_t>(1)); auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, steps, "Slice_" + std::to_string(currentFirstDims[axis])); slice -> addChild(newNode, 0, 0); diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp deleted file mode 100644 index a9a20c3fd74137419115084204c7443fa4813124..0000000000000000000000000000000000000000 --- a/unit_tests/operator/Test_SliceImpl.cpp +++ /dev/null @@ -1,231 +0,0 @@ -/******************************************************************************** - * Copyright (c) 2023 CEA-List - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0. - * - * SPDX-License-Identifier: EPL-2.0 - * - ********************************************************************************/ - -#include <catch2/catch_test_macros.hpp> - -#include "aidge/data/Tensor.hpp" -#include "aidge/operator/Slice.hpp" - -using namespace Aidge; - -TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") { - SECTION("1D Tensor") { - std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> { - {0, 1, -2,-3, 4,-5,-6, 7, 8, 9} - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int,3> { - {0, 1, -2} - }); - std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,1>{{0}}); - std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,1>{{3}}); - std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,1>{{0}}); - - std::shared_ptr<Node> mySlice = Slice(); - auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator()); - mySlice->getOperator()->associateInput(0,input0); - mySlice->getOperator()->associateInput(1,starts); - mySlice->getOperator()->associateInput(2,ends); - mySlice->getOperator()->associateInput(3,axes); - mySlice->getOperator()->setDataType(DataType::Int32); - mySlice->getOperator()->setBackend("cpu"); - mySlice->forward(); - - REQUIRE(*(op->getOutput(0)) == *expectedOutput); - REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims()); - REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType()); - } - - SECTION("2D Tensor") { - std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array2D<int,2,10> { - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<int,2,3> { - { - {-5,-6, 7}, - {-5,-6, 7} - } - }); - std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,2>{{0,5}}); - std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,2>{{2,8}}); - std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,2>{{0,1}}); - - std::shared_ptr<Node> mySlice = Slice(); - auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator()); - mySlice->getOperator()->associateInput(0,input0); - mySlice->getOperator()->associateInput(1,starts); - mySlice->getOperator()->associateInput(2,ends); - mySlice->getOperator()->associateInput(3,axes); - mySlice->getOperator()->setDataType(DataType::Int32); - mySlice->getOperator()->setBackend("cpu"); - mySlice->forward(); - op->getOutput(0)->print(); - REQUIRE(*(op->getOutput(0)) == *expectedOutput); - REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims()); - REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType()); - } - - SECTION("3D Tensor") { - std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array3D<int,2,2,10> { - { - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - }, - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - } - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<int,1,1,3> { - { - { - { 4,-5,-6} - } - } - }); - std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,3>{{0,1,4}}); - std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,3>{{1,2,7}}); - std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,3>{{0,1,2}}); - - std::shared_ptr<Node> mySlice = Slice(); - auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator()); - mySlice->getOperator()->associateInput(0,input0); - mySlice->getOperator()->associateInput(1,starts); - mySlice->getOperator()->associateInput(2,ends); - mySlice->getOperator()->associateInput(3,axes); - mySlice->getOperator()->setDataType(DataType::Int32); - mySlice->getOperator()->setBackend("cpu"); - mySlice->forward(); - // mySlice->getOperator()->output(0).print(); - REQUIRE(*(op->getOutput(0)) == *expectedOutput); - REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims()); - REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType()); - } - - SECTION("4D Tensor") { - std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> { - { - { - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - }, - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - } - }, - { - { - { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - }, - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3,11,-5,-6, 7,-1,10} - } - } - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,2,2,2,10> { - { - { - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - }, - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - } - }, - { - { - { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - }, - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3,11,-5,-6, 7,-1,10} - } - } - } - }); - std::shared_ptr<Tensor> starts = std::make_shared<Tensor>(Array1D<int,4>{{0,0,0,0}}); - std::shared_ptr<Tensor> ends = std::make_shared<Tensor>(Array1D<int,4>{{2,2,2,10}}); - std::shared_ptr<Tensor> axes = std::make_shared<Tensor>(Array1D<int,4>{{0,1,2,3}}); - - std::shared_ptr<Node> mySlice = Slice(); - auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator()); - mySlice->getOperator()->associateInput(0,input0); - mySlice->getOperator()->associateInput(1,starts); - mySlice->getOperator()->associateInput(2,ends); - mySlice->getOperator()->associateInput(3,axes); - mySlice->getOperator()->setDataType(DataType::Int32); - mySlice->getOperator()->setBackend("cpu"); - mySlice->forward(); - // op->getOutput(0)->print(); - REQUIRE(*(op->getOutput(0)) == *expectedOutput); - REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims()); - REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType()); - } - - SECTION("Attributes instead of inputs") { - std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array4D<int,2,2,2,10> { - { - { - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - }, - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - } - }, - { - { - { 0, 1, 2,-3, 6,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3, 4,-5,-6, 7,-1,10} - }, - { - { 0, 1, 2,-3, 4,-5,-6, 7, 8, 9}, - {-5, 4, 2,-3,11,-5,-6, 7,-1,10} - } - } - } - }); - std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<int,1,1,1,5> { - { - { - { - { 0, 1, 2,-3, 4} - } - } - } - }); - - std::shared_ptr<Node> mySlice = Slice({0,0,0,0}, {1,1,1,5}, {0,1,2,3}, {1,1,1,1}); - auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator()); - mySlice->getOperator()->associateInput(0,input0); - mySlice->getOperator()->setDataType(DataType::Int32); - mySlice->getOperator()->setBackend("cpu"); - mySlice->forward(); - // op->getOutput(0)->print(); - REQUIRE(*(op->getOutput(0)) == *expectedOutput); - REQUIRE(op->getOutput(0)->dims() == expectedOutput->dims()); - REQUIRE(op->getOutput(0)->dataType() == expectedOutput->dataType()); - } -}