Skip to content
Snippets Groups Projects
  • Maxence Naud's avatar
    63ec8e67
    Update operators implementation · 63ec8e67
    Maxence Naud authored
    - adapt to core changes with Operator not refering to Tensors anymore
    - remove assertions already performed in abstract operators constructions
    - fix missing 'template' keyword prior to dependent template name 'dims'
    63ec8e67
    History
    Update operators implementation
    Maxence Naud authored
    - adapt to core changes with Operator not refering to Tensors anymore
    - remove assertions already performed in abstract operators constructions
    - fix missing 'template' keyword prior to dependent template name 'dims'
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
SliceImpl.cpp 10.69 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include <cassert>
#include <numeric>    // std::accumulate
#include <functional> // std::multiplies

#include "aidge/operator/Slice.hpp"

#include "aidge/backend/cpu/operator/SliceImpl.hpp"
#include "aidge/backend/cpu/operator/SliceImpl_forward_kernels.hpp"
#include "aidge/utils/Types.h"
#include <vector>
#include <cassert>
#include <tuple>


Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");

    // Requires the whole tensors
    return std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<1>()[0];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }

Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
    (void)outputIdx;
    (void)inputsSize;
    return std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<1>()[0];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
    return mNbConsumedData[0];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<1>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
    return mNbProducedData[0];
}

void Aidge::SliceImpl_cpu<1>::updateConsummerProducer() {
    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<1>::forward() {
    // FIXME: uncomment the following code once memory handling will work
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");

    // Find the correct kernel type
    auto kernelFunc = Registrar<SliceImplForward_cpu<1>>::create(
            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});

    // Call kernel
    kernelFunc(dynamic_cast<const Slice_Op<1>&>(mOp).getStaticAttributes(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<1>(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
            );

    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<1>::backward() { printf("Not implemented yet.\n"); }

/////////////////////////////////////////////////////////////////////////

Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");

    // Requires the whole tensors
    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<2>();
    return inputDims[0]*inputDims[1];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }

Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
    (void)outputIdx;
    (void)inputsSize;
    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<2>();
    return outputDims[0]*outputDims[1];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
    return mNbConsumedData[0];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<2>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
    return mNbProducedData[0];
}

void Aidge::SliceImpl_cpu<2>::updateConsummerProducer() {
    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<2>::forward() {
    // FIXME: uncomment the following code once memory handling will work
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");

    // Find the correct kernel type
    auto kernelFunc = Registrar<SliceImplForward_cpu<2>>::create(
            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});

    // Call kernel
    kernelFunc(dynamic_cast<const Slice_Op<2>&>(mOp).getStaticAttributes(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<2>(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
            );

    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<2>::backward() { printf("Not implemented yet.\n"); }

////////////////////////////////////////////////////////////////////////////

Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");

    // Requires the whole tensors
    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<3>();

    return std::accumulate(inputDims.begin(), inputDims.end(), static_cast<NbElts_t>(1),
                            std::multiplies<NbElts_t>());
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }

Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
    (void)outputIdx;
    (void)inputsSize;
    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<3>();
    return std::accumulate(outputDims.begin(), outputDims.end(), static_cast<NbElts_t>(1),
                            std::multiplies<NbElts_t>());
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
    return mNbConsumedData[0];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<3>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
    return mNbProducedData[0];
}

void Aidge::SliceImpl_cpu<3>::updateConsummerProducer() {
    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<3>::forward() {
    // FIXME: uncomment the following code once memory handling will work
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");

    // Find the correct kernel type
    auto kernelFunc = Registrar<SliceImplForward_cpu<3>>::create(
            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});

    // Call kernel
    kernelFunc(dynamic_cast<const Slice_Op<3>&>(mOp).getStaticAttributes(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<3>(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
            );

    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<3>::backward() { printf("Not implemented yet.\n"); }

//////////////////////////////////////////////////////////////////////////////

Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbRequiredData(const Aidge::IOIndex_t /*inputIdx*/) const {
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "requires valid input");

    // Requires the whole tensors
    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>();

    return std::accumulate(inputDims.begin(), inputDims.end(), static_cast<NbElts_t>(1),
                            std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { return 0; }

Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                               const std::vector<Aidge::DimSize_t>& inputsSize) const {
    (void)outputIdx;
    (void)inputsSize;
    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->template dims<4>();
    return std::accumulate(outputDims.begin(), outputDims.end(), static_cast<NbElts_t>(1),
                            std::multiplies<NbElts_t>());
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbConsumedData(const Aidge::IOIndex_t /*inputIdx*/) const {
    return mNbConsumedData[0];
}

Aidge::NbElts_t Aidge::SliceImpl_cpu<4>::getNbProducedData(const Aidge::IOIndex_t /*outputIdx*/) const {
    return mNbProducedData[0];
}

void Aidge::SliceImpl_cpu<4>::updateConsummerProducer() {
    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<4>::forward() {
    // FIXME: uncomment the following code once memory handling will work
    assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");

    // Find the correct kernel type
    auto kernelFunc = Registrar<SliceImplForward_cpu<4>>::create(
            {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType()});

    // Call kernel
    kernelFunc(dynamic_cast<const Slice_Op<4>&>(mOp).getStaticAttributes(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->template dims<4>(),
                std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
                std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()
            );

    // each input is consumed by the minimum amount for a forward pass
    mNbConsumedData[0] += getNbRequiredData(0);

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::SliceImpl_cpu<4>::backward() { printf("Not implemented yet.\n"); }