Skip to content
Snippets Groups Projects
SoftmaxImpl.cpp 3 KiB
Newer Older
Cyril Moineau's avatar
Cyril Moineau committed
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include <cassert>
#include <numeric>
#include <chrono>
#include <thread>

#include "operator/Softmax.hpp"

#include "operator/SoftmaxImpl.hpp"
#include "operator/SoftmaxImpl_forward_kernels.hpp"
#include "utils/Types.h"
#include <numeric>
#include <vector>

// FIXME: replace whole Tensor with minimum needed data quantity
Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
    assert(mOp.getInput(0) && "requires valid input");

    // Requires the whole tensors
    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();

    return std::accumulate(inputDims.begin(), inputDims.end(),
                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}

Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
Cyril Moineau's avatar
Cyril Moineau committed
    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
    return 0;
}

Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
Cyril Moineau's avatar
Cyril Moineau committed
    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
    return std::accumulate(outputDims.begin(), outputDims.end(),
                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}

Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
    return mNbConsumedData[0];
}

Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
    return mNbProducedData[0];
}

void Aidge::SoftmaxImpl_cpu::forward() {
    // FIXME: uncomment the following code once memory handling will work
    assert(mOp.getInput(0) && "missing input #0");
    assert(mOp.getInput(0)->nbDims()>1);

    // Find the correct kernel type
    auto kernelFunc = Registrar<SoftmaxImplForward_cpu>::create({
        mOp.getInput(0)->dataType(),
        mOp.getOutput(0)->dataType()});

    DimSize_t batchSize = mOp.getInput(0)->dims()[0];
    DimSize_t channelSize = mOp.getInput(0)->dims()[1];
    DimSize_t featureSize = mOp.getInput(0)->sizeM1()/channelSize;
    // Call kernel
    kernelFunc(batchSize,
               channelSize,
               featureSize,
               mOp.getInput(0)->getImpl()->rawPtr(),
               mOp.getOutput(0)->getImpl()->rawPtr());


    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass

    mNbProducedData[0]+= getRequiredMemory(0, {});
}

void Aidge::SoftmaxImpl_cpu::backward() {
    printf("Not implemented yet.\n");
}