Skip to content
Snippets Groups Projects
Forked from Eclipse Projects / aidge / aidge_core
2537 commits behind the upstream repository.
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
FC.hpp 6.91 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#ifndef AIDGE_CORE_OPERATOR_FC_H_
#define AIDGE_CORE_OPERATOR_FC_H_

#include <array>
#include <cmath>
#include <numeric>
#include <memory>
#include <vector>

#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"

namespace Aidge {
enum class FCAttr { OutChannels, NoBias };

class FC_Op : public Operator,
              public Registrable<FC_Op,
                                 std::string,
                                 std::unique_ptr<OperatorImpl>(const FC_Op &)>,
              public StaticAttributes<FCAttr, DimSize_t, bool> {
public:
    // FIXME: change accessibility
    std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();

public:
    static constexpr const char* Type = "FC";

    FC_Op() = delete;

    using Attributes_ = StaticAttributes<FCAttr, DimSize_t, bool>;
    template <FCAttr e> using attr = typename Attributes_::template attr<e>;

    FC_Op(DimSize_t out_channels, bool noBias)
            : Operator(Type),
            Attributes_(
                attr<FCAttr::OutChannels>(out_channels),
                attr<FCAttr::NoBias>(noBias))
    {
        setDatatype(DataType::Float32);
    }

    /**
     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
     * @param op Operator to copy.
     */
    FC_Op(const FC_Op& op)
        : Operator(Type),
          Attributes_(op),
          mOutput(std::make_shared<Tensor>(*op.mOutput))
    {
        // cpy-ctor
        setDatatype(op.mOutput->dataType());
        mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr;
    }

    /**
     * @brief Clone the operator using its copy-constructor.
     * @see Operator::FC_Op
     */
    std::shared_ptr<Operator> clone() const override {
        return std::make_shared<FC_Op>(*this);
    }

    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
        assert(inputIdx < 3 && "operators supports only 3 inputs");
        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
        if (inputIdx == 2) {
            assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template getAttr<FCAttr::NoBias>()) == false ? static_cast<std::size_t>(this->template getAttr<FCAttr::OutChannels>()) : 0));
            assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
        }
        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
        if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
            mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
    }

    void computeOutputDims() override final {
        if (!mInputs[0]->empty()) {
            // <in_features**, out_channels>
            std::array<DimSize_t, 2> weightDims = {this->template getAttr<FCAttr::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
            // <out_channels, batch>
            std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template getAttr<FCAttr::OutChannels>()};

            mInputs[1]->resize(weightDims);
            mOutput->resize(outputDims);
        }
    }

    bool outputDimsForwarded() const override final {
        return !(mOutput->empty());
    }


    inline Tensor& input(const IOIndex_t inputIdx) const override final {
        assert(inputIdx < 3 && "operators supports only 3 inputs");
        return *(mInputs[inputIdx].get()); }
    inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }


    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
        assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
        return mInputs[inputIdx];
    }
    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
        assert((outputIdx == 0) && "FC Operator has only 1 output");
        (void) outputIdx; // avoid unused warning
        return mOutput;
    }


    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
        assert(inputIdx < 3 && "operators supports only 3 inputs");
        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
    }
    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
        assert(outputIdx == 0 && "operator supports only 1 output");
        (void) outputIdx; // avoid unused warning
        return std::static_pointer_cast<Data>(mOutput);
    }


    void setBackend(const std::string& name) override {
        mImpl = Registrar<FC_Op>::create(name)(*this);
        mOutput->setBackend(name);

        // FIXME: temporary workaround
        mInputs[0]->setBackend(name);
        mInputs[1]->setBackend(name);
        mInputs[2]->setBackend(name);
    }

    void setDatatype(const DataType& datatype) override {
        mOutput->setDatatype(datatype);

        // FIXME: temporary workaround
        mInputs[0]->setDatatype(datatype);
        mInputs[1]->setDatatype(datatype);
        mInputs[2]->setDatatype(datatype);
    }


    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
    static const std::vector<std::string> getInputsName(){
        return {"data_input", "weight", "bias"};
    }
    static const std::vector<std::string> getOutputsName(){
        return {"data_output"};
    }
};

inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
    // FIXME: properly handle default w&b initialization in every cases
    auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
    addProducer(fc, 1, std::array<DimSize_t, 2>({out_channels, 1}), "w");
    addProducer(fc, 2, (noBias ? std::array<DimSize_t, 1>({0}) : std::array<DimSize_t, 1>({out_channels})), "b"); // already sets bias dims
    return fc;
}
} // namespace Aidge

namespace {
template <>
const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                        "NoBias"};
}

#endif /* AIDGE_CORE_OPERATOR_FC_H_ */