Skip to content
Snippets Groups Projects
PadImpl.cpp 4.02 KiB
Newer Older
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include <cassert>
#include <chrono>  // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread>  // std::this_thread::sleep_for
#include <vector>

#include "aidge/utils/Types.h"
#include "aidge/operator/Conv.hpp"

#include "aidge/backend/cpu/operator/PadImpl.hpp"
#include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp"

Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
    assert(inputIdx == 0 && "operator has only one input");
    (void) inputIdx;

    // Requires the whole tensors
    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();

    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
}

Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const {
    assert(inputIdx == 0 && "operator has only one input");
    (void) inputIdx;

    // Padding cannot be in-place!
    // We must ensure that we do not override data that has not been consummed yet.
    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
    const size_t inputSize = std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
    const size_t outputSize = std::accumulate(outputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());

    return (outputSize - inputSize);
}

Aidge::NbElts_t Aidge::PadImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
    // Requires the whole tensors, regardless of available data on inputs
    assert(outputIdx == 0 && "operator has only one output");
    (void) outputIdx;

    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}

Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
}

Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
}

void Aidge::PadImpl2D_cpu::updateConsummerProducer(){
    // Update producer-consumer data
    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
                                                                   // amount for a forward pass

    mNbProducedData[0] += getRequiredMemory(0, {});
}

void Aidge::PadImpl2D_cpu::forward() {
    // FIXME: uncomment the following code once memory handling will work
    assert(mOp.getInput(0) && "missing input #0");

    // Find the correct kernel type
    auto kernelFunc =
            Registrar<PadImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});

    // Call kernel
    kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
               mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());


}

void Aidge::PadImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }