Forked from
Eclipse Projects / aidge / aidge_core
2433 commits behind the upstream repository.
-
Olivier BICHLER authoredOlivier BICHLER authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
FC.hpp 6.04 KiB
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_FC_H_
#define AIDGE_CORE_OPERATOR_FC_H_
#include <array>
#include <cmath>
#include <numeric>
#include <memory>
#include <vector>
#include "aidge/utils/Types.h"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Parameter.hpp"
#include "aidge/utils/Registrar.hpp"
namespace Aidge {
enum class FCParam { OutChannels, NoBias };
class FC_Op : public Operator,
public Registrable<FC_Op,
std::string,
std::unique_ptr<OperatorImpl>(const FC_Op &)>,
public Parameterizable<FCParam, DimSize_t, bool> {
public:
// FIXME: change accessibility
std::array<std::shared_ptr<Tensor>, 3> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>(), std::make_shared<Tensor>()};
const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
public:
static constexpr const char* Type = "FC";
FC_Op() = delete;
Operator* clone() const override {
return new FC_Op(*static_cast<const FC_Op*>(this));
}
using Parameterizable_ = Parameterizable<FCParam, DimSize_t, bool>;
template <FCParam e> using param = typename Parameterizable_::template param<e>;
FC_Op(DimSize_t out_channels, bool noBias)
: Operator(Type),
Parameterizable_(
param<FCParam::OutChannels>(out_channels),
param<FCParam::NoBias>(noBias)),
mOutput(std::make_shared<Tensor>())
{
setDatatype(DataType::Float32);
}
void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
if (inputIdx == 2) {
assert(std::dynamic_pointer_cast<Tensor>(data)->size() == ((this->template get<FCParam::NoBias>()) == false ? static_cast<std::size_t>(this->template get<FCParam::OutChannels>()) : 0));
assert(std::dynamic_pointer_cast<Tensor>(data)->nbDims() == 1);
}
mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
if (inputIdx == 0 && mInputs[0]->nbDims() == 1)
mInputs[inputIdx]->resize(std::array<DimSize_t, 2>({1, mInputs[inputIdx]->size()}));
}
void computeOutputDims() override final {
if (!mInputs[0]->empty()) {
// <in_features**, out_channels>
std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
// <out_channels, batch>
std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
mInputs[1]->resize(weightDims);
mOutput->resize(outputDims);
}
}
bool outputDimsForwarded() const override final {
return !(mOutput->empty());
}
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return *(mInputs[inputIdx].get()); }
inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
return mInputs[inputIdx];
}
inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
assert((outputIdx == 0) && "FC Operator has only 1 output");
(void) outputIdx; // avoid unused warning
return mOutput;
}
std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
return std::static_pointer_cast<Data>(mInputs[inputIdx]);
}
std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
assert(outputIdx == 0 && "operator supports only 1 output");
(void) outputIdx; // avoid unused warning
return std::static_pointer_cast<Data>(mOutput);
}
void setBackend(const std::string& name) {
mImpl = Registrar<FC_Op>::create(name)(*this);
mOutput->setBackend(name);
// FIXME: temporary workaround
mInputs[0]->setBackend(name);
mInputs[1]->setBackend(name);
mInputs[2]->setBackend(name);
}
void setDatatype(const DataType& datatype) {
mOutput->setDatatype(datatype);
// FIXME: temporary workaround
mInputs[0]->setDatatype(datatype);
mInputs[1]->setDatatype(datatype);
mInputs[2]->setDatatype(datatype);
}
inline IOIndex_t nbInputs() const noexcept override final { return 3; }
inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
addProducer(fc, 1, {out_channels, 1}, "w");
addProducer(fc, 2, {(noBias ? 0 : out_channels)}, "b"); // already sets bias dims
return fc;
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
"NoBias"};
}
#endif /* AIDGE_CORE_OPERATOR_FC_H_ */