Skip to content
Snippets Groups Projects
FCImpl.cpp 4.86 KiB
Newer Older
Cyril Moineau's avatar
Cyril Moineau committed
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#include "aidge/backend/cpu/operator/FCImpl.hpp"

#include <cstddef>  // std::size_t
#include <functional>
#include <memory>
#include <tuple>
Cyril Moineau's avatar
Cyril Moineau committed

#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp"
#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp"
#include "aidge/operator/FC.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
Cyril Moineau's avatar
Cyril Moineau committed

Cyril Moineau's avatar
Cyril Moineau committed
void Aidge::FCImpl_cpu::forward()
{
    const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
    AIDGE_ASSERT(op_.getInput(0), "missing input #0");
    AIDGE_ASSERT(op_.getInput(1), "missing input #1");
    AIDGE_ASSERT(op_.getInput(2), "missing input #2");
Cyril Moineau's avatar
Cyril Moineau committed

    // Find the correct kernel type
    const auto outputDataType = op_.getOutput(0)->dataType();
Olivier BICHLER's avatar
Olivier BICHLER committed
    const Registrar<FCImplForward_cpu>::registrar_key registrarKey = {
        op_.getInput(0)->dataType(),
        op_.getInput(1)->dataType(),
        op_.getInput(2)->dataType(),
Olivier BICHLER's avatar
Olivier BICHLER committed
        outputDataType};

    Registrar<FCImplForward_cpu>::registrar_type kernelFunc;
    if (Registrar<FCImplForward_cpu>::exists(registrarKey)) {
        // One exists with the right inputs/output types
        kernelFunc = Registrar<FCImplForward_cpu>::create(registrarKey);
    }
    else {
        // Otherwise, fallback to the kernel with all types matching output type
        kernelFunc = Registrar<FCImplForward_cpu>::create({
            outputDataType, outputDataType, outputDataType, outputDataType});
    }

    // Convert input data (no overhead if not needed!)
    // TODO: right now, if needed, memory will be allocated/deallocated at each
    // call to forward(). We might put the following shared_ptr as members of
    // this class to avoid that.
    std::shared_ptr<Tensor> input0Fallback, input1Fallback, input2Fallback;
    const auto& input0 = op_.getInput(0)->refCastFrom(input0Fallback, *(op_.getOutput(0)));
    const auto& input1 = op_.getInput(1)->refCastFrom(input1Fallback, *(op_.getOutput(0)));
    const auto& input2 = op_.getInput(2)->refCastFrom(input2Fallback, *(op_.getOutput(0)));
Cyril Moineau's avatar
Cyril Moineau committed

    // Call kernel
    const auto batchSize = (input0.dims().size() > 1) ? input0.dims()[0] : 1;
Olivier BICHLER's avatar
Olivier BICHLER committed
    kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
        batchSize,
        input0.size() / batchSize,
Olivier BICHLER's avatar
Olivier BICHLER committed
        input0.getImpl()->rawPtr(), input1.getImpl()->rawPtr(), input2.getImpl()->rawPtr(),
Cyril Moineau's avatar
Cyril Moineau committed
}
void Aidge::FCImpl_cpu::backward()
{
    const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
    const auto& fc_grad = op_.getOutput(0)->grad();
    assert(fc_grad && "missing ouput #0 gradient");
    // Find the correct kernel type
    const Registrar<FCImplBackward_cpu>::registrar_key registrarKey = {
        fc_grad->dataType(),
        op_.getInput(0)->grad()->dataType(),
        op_.getInput(1)->grad()->dataType(),
        op_.getInput(2)->grad()->dataType()};
    Registrar<FCImplBackward_cpu>::registrar_type kernelFunc;
    if (Registrar<FCImplBackward_cpu>::exists(registrarKey)) {
        // One exists with the right inputs/output types
        kernelFunc = Registrar<FCImplBackward_cpu>::create(registrarKey);
    }
    else {
        // Otherwise, fallback to the kernel with all types matching output type
        kernelFunc = Registrar<FCImplBackward_cpu>::create({
            fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType()});
    }
    // Convert input data (no overhead if not needed!)
    // TODO: right now, if needed, memory will be allocated/deallocated at each
    // call to forward(). We might put the following shared_ptr as members of
    // this class to avoid that.
    std::shared_ptr<Tensor> input0gradFallback, input1gradFallback, input2gradFallback;
    const auto& input0grad = op_.getInput(0)->grad()->refCastFrom(input0gradFallback, *(op_.getOutput(0)));
    const auto& input1grad = op_.getInput(1)->grad()->refCastFrom(input1gradFallback, *(op_.getOutput(0)));
    const auto& input2grad = op_.getInput(2)->grad()->refCastFrom(input2gradFallback, *(op_.getOutput(0)));
    // Call kernel
    const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
    kernelFunc(dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
        batchSize,
        input0grad.size() / batchSize,
        getCPUPtr(fc_grad),
        getCPUPtr(op_.getInput(0)),
        getCPUPtr(mOp.getRawInput(1)),
        input0grad.getImpl()->rawPtr(),
        input1grad.getImpl()->rawPtr(),
        input2grad.getImpl()->rawPtr());
}