Skip to content
Snippets Groups Projects
Slice.cpp 9.38 KiB
Newer Older
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include "aidge/operator/Slice.hpp"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <utility>
#include <vector>
#include <fmt/format.h>

#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
Olivier BICHLER's avatar
Olivier BICHLER committed
void Aidge::Slice_OpImpl::forward() {
    const Slice_Op& op = dynamic_cast<const Slice_Op&>(mOp);

    for(std::size_t i = 0; i < 4; ++i){
        if (!op.getInput(i)) {
            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", op.Type, i);
        }
    }
    AIDGE_ASSERT((op.getInput(1)->size() == op.getInput(2)->size()) && (op.getInput(1)->size() == op.getInput(3)->size()), "start, end and axes arguments should be the same size.");

    const std::size_t nbDims = op.getInput(0)->nbDims();

    const int* starts = static_cast<const int*>(op.getInput(1)->getImpl()->rawPtr());
    const int* ends = static_cast<const int*>(op.getInput(2)->getImpl()->rawPtr());
    const int* axes = static_cast<const int*>(op.getInput(3)->getImpl()->rawPtr());


    const std::vector<std::size_t>& inputDims = op.getInput(0)->dims();
    auto outputDims = op.getInput(0)->dims();

    // compute index of the output's first element
    // compute output dimension at the same time (may change between two forward calls)
Olivier BICHLER's avatar
Olivier BICHLER committed
    std::size_t beginning = 0;
    const std::size_t nbAxes = op.getInput(3)->size();
Olivier BICHLER's avatar
Olivier BICHLER committed
    for (std::size_t i = 0; i < nbAxes; ++i) {
        // For each slice operation get the params and cast them to size_t
        const int axis_ = axes[i];
        const int start_ = starts[i];
        const int end_ = ends[i];
        const std::size_t axis = static_cast<std::size_t>(axis_ >= 0 ? axis_ : axis_ + static_cast<int>(inputDims.size()));
Olivier BICHLER's avatar
Olivier BICHLER committed
        const std::size_t start = start_ >= 0 ? start_ : start_ + inputDims[axis];
        const std::size_t end = end_ >= 0 ? end_ : end_ + inputDims[axis];
        const std::size_t stridePostAxis = std::accumulate(inputDims.cbegin()+axis+1, inputDims.cend(), std::size_t(1), std::multiplies<std::size_t>());
        beginning += start * stridePostAxis;
        const std::size_t sliceLength = end - start;
        outputDims[axis] = sliceLength;
    op.getOutput(0)->resize(outputDims);
    // for inputDims = {4,5,5,3} & outputDims = {3,2,2,1}: substractDims = {1,5,5,3}
Olivier BICHLER's avatar
Olivier BICHLER committed
    std::vector<std::size_t> substractedDims = std::vector<std::size_t>(nbDims);
    for (std::size_t i = 0; i < nbDims; ++i) {
        substractedDims[i] = inputDims[i] - outputDims[i];
    // for outputDims = {3,2,2,1}: prodOutputDims = {12,4,2,1}
    std::vector<std::size_t> prodOutputDims = std::vector<std::size_t>(nbDims);
Olivier BICHLER's avatar
Olivier BICHLER committed
    std::vector<std::size_t> prodInputDims = std::vector<std::size_t>(nbDims + 1);
    prodOutputDims[nbDims - 1] = outputDims[nbDims - 1];
Olivier BICHLER's avatar
Olivier BICHLER committed
    prodInputDims[nbDims - 1] = inputDims[nbDims - 1];
    prodInputDims[nbDims] = 1;
    for (std::size_t i = 2; i <= nbDims; ++i) {
        prodOutputDims[nbDims - i] = prodOutputDims[nbDims - i + 1] * outputDims[nbDims - i];
Olivier BICHLER's avatar
Olivier BICHLER committed
        prodInputDims[nbDims - i] = prodInputDims[nbDims - i + 1] * inputDims[nbDims - i];
    }

    std::size_t i = beginning;
    std::size_t size = 0; // number of elements to copy
    std::size_t offset = 0;
    for (std::size_t j = 0; j < prodOutputDims[0];) {
        ++size;
Olivier BICHLER's avatar
Olivier BICHLER committed
        ++i;
        ++j;
        bool newChunk = false;
Olivier BICHLER's avatar
Olivier BICHLER committed
        for (std::size_t idx = nbDims - 1; idx > 0; --idx) {
            if (j % prodOutputDims[idx] == 0) {
                i += substractedDims[idx] * prodInputDims[idx + 1];
                newChunk = true;
            }

        if (newChunk) {
            op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
            beginning = i;
            offset += size;
            size = 0;
        }
    }

    if (size > 0) {
        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(beginning), size, offset);
const std::string Aidge::Slice_Op::Type = "Slice";

bool Aidge::Slice_Op::forwardDims(bool /*allowDataDependency*/) {
Houssem ROUIS's avatar
Houssem ROUIS committed
    // check inputs have been associated
    for(std::size_t i = 0; i < 4; ++i){
Houssem ROUIS's avatar
Houssem ROUIS committed
        if (!getInput(i)) {
            AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
        }
    if((!getInput(0)->empty()) && (!getInput(1)->empty()) && (!getInput(2)->empty()) && (!getInput(3)->empty()))
    {
        const void* starts = mInputs[1]->getImpl()->rawPtr();
        const void* ends = mInputs[2]->getImpl()->rawPtr();
        const void* axes = mInputs[3]->getImpl()->rawPtr();
        AIDGE_ASSERT((mInputs[1]->dataType() == mInputs[2]->dataType()) && (mInputs[1]->dataType() == mInputs[3]->dataType()), "Slice inputs must have the same dataType.");

        DimSize_t nbAxes = mInputs[1]->size();
        std::vector<DimSize_t> outDims = getInput(0)->dims();
        for (std::size_t i = 0; i < nbAxes; ++i) {
            // For each slice operation get the params and cast them to size_t
            DimSize_t start, end, axis = 0;
            switch (getInput(1)->dataType())
                case DataType::Float32: {
                    using ctype = cpptype_t<DataType::Float32>;
                    const ctype* starts_ = static_cast<const ctype*>(starts);
                    const ctype* ends_ = static_cast<const ctype*>(ends);
                    const ctype* axes_ = static_cast<const ctype*>(axes);
                    axis = axes_[i] >= 0 ? static_cast<DimSize_t>(axes_[i]) : static_cast<DimSize_t>(axes_[i] + static_cast<ctype>(getInput(0)->nbDims()));
                    start = starts_[i] >= 0 ? static_cast<DimSize_t>(starts_[i]) : static_cast<DimSize_t>(starts_[i] + static_cast<ctype>(getInput(0)->dims()[axis]));
                    end = ends_[i] >= 0 ? static_cast<DimSize_t>(ends_[i]) : static_cast<DimSize_t>(ends_[i] + static_cast<ctype>(getInput(0)->dims()[ends_[i]]));
                } break;

                case DataType::Int32: {
                    using ctype = cpptype_t<DataType::Int32>;
                    const ctype* starts_ = static_cast<const ctype*>(starts);
                    const ctype* ends_ = static_cast<const ctype*>(ends);
                    const ctype* axes_ = static_cast<const ctype*>(axes);
                    axis = axes_[i] >= 0 ? static_cast<DimSize_t>(axes_[i]) : static_cast<DimSize_t>(axes_[i] + static_cast<ctype>(getInput(0)->nbDims()));
                    start = starts_[i] >= 0 ? static_cast<DimSize_t>(starts_[i]) : static_cast<DimSize_t>(starts_[i] + static_cast<ctype>(getInput(0)->dims()[axis]));
                    end = ends_[i] >= 0 ? static_cast<DimSize_t>(ends_[i]) : static_cast<DimSize_t>(ends_[i] + static_cast<ctype>(getInput(0)->dims()[ends_[i]]));
                } break;

                case DataType::Int64: {
                    using ctype = cpptype_t<DataType::Int64>;
                    const ctype* starts_ = static_cast<const ctype*>(starts);
                    const ctype* ends_ = static_cast<const ctype*>(ends);
                    const ctype* axes_ = static_cast<const ctype*>(axes);
                    axis = axes_[i] >= 0 ? static_cast<DimSize_t>(axes_[i]) : static_cast<DimSize_t>(axes_[i] + static_cast<ctype>(getInput(0)->nbDims()));
                    start = starts_[i] >= 0 ? static_cast<DimSize_t>(starts_[i]) : static_cast<DimSize_t>(starts_[i] + static_cast<ctype>(getInput(0)->dims()[axis]));
                    end = ends_[i] >= 0 ? static_cast<DimSize_t>(ends_[i]) : static_cast<DimSize_t>(ends_[i] + static_cast<ctype>(getInput(0)->dims()[ends_[i]]));
                } break;

                case DataType::UInt64: {
                    using ctype = cpptype_t<DataType::UInt64>;
                    const ctype* starts_ = static_cast<const ctype*>(starts);
                    const ctype* ends_ = static_cast<const ctype*>(ends);
                    const ctype* axes_ = static_cast<const ctype*>(axes);
                    axis = static_cast<DimSize_t>(axes_[i]);
                    start = static_cast<DimSize_t>(starts_[i]);
                    end = static_cast<DimSize_t>(ends_[i]);
                } break;

                default:
                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Slice inputs type is not supported yet");
            }
            const std::size_t sliceLength = end - start;
            // Check if slice length is valid
            if (sliceLength > getInput(0)->dims()[axis])
            {
                AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
            }
            outDims[axis] = sliceLength;
        mOutputs[0]->resize(outDims);
Olivier BICHLER's avatar
Olivier BICHLER committed

void Aidge::Slice_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
    if (Registrar<Slice_Op>::exists({name})){
        SET_IMPL_MACRO(Slice_Op, *this, name);
    }
    else {
        mImpl = std::make_shared<Slice_OpImpl>(*this);
Olivier BICHLER's avatar
Olivier BICHLER committed
    mOutputs[0]->setBackend(name, device);
}