Skip to content
Snippets Groups Projects
Commit fa33e4c5 authored by Olivier BICHLER's avatar Olivier BICHLER
Browse files

Minor refactor

parent cd66331b
No related branches found
No related tags found
No related merge requests found
......@@ -36,6 +36,7 @@
#include "aidge/operator/MatMul.hpp"
#include "aidge/operator/MaxPooling.hpp"
#include "aidge/operator/MetaOperator.hpp"
#include "aidge/operator/MetaOperatorDefs.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/operator/ReLU.hpp"
......
......@@ -41,68 +41,7 @@ public:
public:
MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
std::vector<NodePtr> inputNodes = std::vector<NodePtr>(),
std::vector<NodePtr> outputNodes = std::vector<NodePtr>())
: Operator(type),
mGraph(graph)
{
mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->inputs().size());
for (std::size_t i = 0; i < mInputs.size(); ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size());
for (std::size_t i = 0; i < mOutputs.size(); ++i) {
mOutputs[i] = std::make_shared<Tensor>();
}
// Fill inputsNodes and outputsNodes when there is no ambiguity
if (inputNodes.empty()) {
AIDGE_ASSERT(mGraph->inputNodes().size() == 1, "need to specify internal nodes input mapping");
inputNodes.push_back(*mGraph->inputNodes().begin());
}
if (outputNodes.empty()) {
AIDGE_ASSERT(mGraph->outputNodes().size() == 1, "need to specify internal nodes output mapping");
outputNodes.push_back(*mGraph->outputNodes().begin());
}
AIDGE_ASSERT(mGraph->inputNodes().size() == inputNodes.size(), "wrong number of specified input nodes");
AIDGE_ASSERT(mGraph->outputNodes().size() == outputNodes.size(), "wrong number of specified output nodes");
// Identify inputs that are outside the micro-graph
for (const auto& inputNode : inputNodes) {
AIDGE_ASSERT(mGraph->inView(inputNode), "input node must be in the graph");
const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
inputNode->inputs();
int inputIdx = 0; // input idx relative to the current node
for (const auto& in : inputNodeinputs) {
if (in.first == nullptr || !mGraph->inView(in.first)) {
// The input is not connected inside the micro-graph
// (no connection to this input or connection outside the micro-graph)
// => it is therefore an input for the meta-operator
mInputOps.push_back(std::make_pair(inputNode->getOperator(), inputIdx));
}
++inputIdx;
}
}
// The outputs of the output nodes are also the outputs of the meta-operator
for (const auto& outputNode : outputNodes) {
AIDGE_ASSERT(mGraph->inView(outputNode), "output node must be in the graph");
const std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>> outputNodeoutputs =
outputNode->outputs();
int outputIdx = 0; // output idx relative to the current node
for (const auto& out : outputNodeoutputs) {
mOutputOps.push_back(std::make_pair(outputNode->getOperator(), outputIdx));
++outputIdx;
}
}
AIDGE_INTERNAL_ASSERT(mInputOps.size() == mGraph->inputs().size());
AIDGE_INTERNAL_ASSERT(mOutputOps.size() == mGraph->outputs().size());
}
std::vector<NodePtr> outputNodes = std::vector<NodePtr>());
/**
* @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
......@@ -208,71 +147,12 @@ public:
inline IOIndex_t nbDataInputs() const noexcept override final { return mGraph->dataInputs().size(); }
inline IOIndex_t nbOutputs() const noexcept override final { return mGraph->outputs().size(); }
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override {
if (mImpl) {
return mImpl->getNbRequiredData(inputIdx);
}
else {
const auto& inputOp = mInputOps[inputIdx];
return inputOp.first->getNbRequiredData(inputOp.second);
}
}
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override {
if (mImpl) {
return mImpl->getNbConsumedData(inputIdx);
}
else {
const auto& inputOp = mInputOps[inputIdx];
return inputOp.first->getNbConsumedData(inputOp.second);
}
}
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override {
if (mImpl) {
return mImpl->getNbProducedData(outputIdx);
}
else {
const auto& outputOp = mOutputOps[outputIdx];
return outputOp.first->getNbProducedData(outputOp.second);
}
}
void updateConsummerProducer() override {
if (mImpl) {
mImpl->updateConsummerProducer();
}
else {
if (!mScheduler) {
// Lazy initialization
mScheduler = std::make_shared<SequentialScheduler>(mGraph);
}
// TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule.
// It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()"
mScheduler->generateScheduling();
}
}
void forward() override {
if (mImpl) {
// A custom implementation exists for this meta operator
mImpl->forward();
}
else {
// No custom implementation, use the individual operators implementations
if (!mScheduler) {
// Lazy initialization
// TODO: should we assert that a scheduler already exists at this point?
// => should be created in updateConsummerProducer()
mScheduler = std::make_shared<SequentialScheduler>(mGraph);
mScheduler->generateScheduling();
}
mScheduler->forward(false);
}
}
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override;
void updateConsummerProducer() override;
void forward() override;
void backward() override {
assert(false && "not implemented");
}
......@@ -285,73 +165,6 @@ inline std::shared_ptr<Node> MetaOperator(const char *type,
{
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>(type, graph), name);
}
// TODO: move elsewhere
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
// Construct micro-graph
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(padding_dims, PadBorderType::Constant, 0.0), (!name.empty()) ? name + "_pad" : "");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
// Need to specify the ordered list of input operators
const std::vector<NodePtr> orderedInputNodes = {pad, conv};
auto metaOp = std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}), orderedInputNodes), name);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(metaOp, 2, {out_channels}, "b");
return metaOp;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConv(
DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
return PaddedConv<DIM>(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedAvgPooling(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
AvgPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
});
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph), name);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
});
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph), name);
}
} // namespace Aidge
#endif /* MetaOperator_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
#define AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_
#include "aidge/operator/MetaOperator.hpp"
namespace Aidge {
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
// Construct micro-graph
auto pad = std::make_shared<Node>(std::make_shared<Pad_Op<static_cast<DimIdx_t>(DIM)>>(padding_dims, PadBorderType::Constant, 0.0), (!name.empty()) ? name + "_pad" : "");
auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), (!name.empty()) ? name + "_conv" : "");
// Need to specify the ordered list of input operators
const std::vector<NodePtr> orderedInputNodes = {pad, conv};
auto metaOp = std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedConv", Sequential({pad, conv}), orderedInputNodes), name);
addProducer(metaOp, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
addProducer(metaOp, 2, {out_channels}, "b");
return metaOp;
}
template <DimSize_t DIM>
inline std::shared_ptr<Node> PaddedConv(
DimSize_t in_channels,
DimSize_t out_channels,
DimSize_t const (&kernel_dims)[DIM],
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0},
const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
{
return PaddedConv<DIM>(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, padding_dims, dilation_dims);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedAvgPooling(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
AvgPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_avgpooling" : "", stride_dims)
});
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedAvgPooling", graph), name);
}
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> PaddedMaxPooling(DimSize_t in_channels,
DimSize_t out_channels,
const std::array<DimSize_t, DIM> &kernel_dims,
const std::string& name = "",
const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
const std::array<std::array<DimSize_t, 2>, DIM> &padding_dims = {0})
{
auto graph = Sequential({
Pad<DIM>(padding_dims, (!name.empty()) ? name + "_pad" : ""),
MaxPooling_Op<DIM>(kernel_dims, (!name.empty()) ? name + "_maxpooling" : "", stride_dims)
});
return std::make_shared<Node>(std::make_shared<MetaOperator_Op>("PaddedMaxPooling", graph), name);
}
} // namespace Aidge
#endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/operator/MetaOperator.hpp"
Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr<GraphView>& graph,
std::vector<NodePtr> inputNodes,
std::vector<NodePtr> outputNodes)
: Operator(type),
mGraph(graph)
{
mInputs = std::vector<std::shared_ptr<Tensor>>(mGraph->inputs().size());
for (std::size_t i = 0; i < mInputs.size(); ++i) {
mInputs[i] = std::make_shared<Tensor>();
}
mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size());
for (std::size_t i = 0; i < mOutputs.size(); ++i) {
mOutputs[i] = std::make_shared<Tensor>();
}
// Fill inputsNodes and outputsNodes when there is no ambiguity
if (inputNodes.empty()) {
AIDGE_ASSERT(mGraph->inputNodes().size() == 1, "need to specify internal nodes input mapping");
inputNodes.push_back(*mGraph->inputNodes().begin());
}
if (outputNodes.empty()) {
AIDGE_ASSERT(mGraph->outputNodes().size() == 1, "need to specify internal nodes output mapping");
outputNodes.push_back(*mGraph->outputNodes().begin());
}
AIDGE_ASSERT(mGraph->inputNodes().size() == inputNodes.size(), "wrong number of specified input nodes");
AIDGE_ASSERT(mGraph->outputNodes().size() == outputNodes.size(), "wrong number of specified output nodes");
// Identify inputs that are outside the micro-graph
for (const auto& inputNode : inputNodes) {
AIDGE_ASSERT(mGraph->inView(inputNode), "input node must be in the graph");
const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
inputNode->inputs();
int inputIdx = 0; // input idx relative to the current node
for (const auto& in : inputNodeinputs) {
if (in.first == nullptr || !mGraph->inView(in.first)) {
// The input is not connected inside the micro-graph
// (no connection to this input or connection outside the micro-graph)
// => it is therefore an input for the meta-operator
mInputOps.push_back(std::make_pair(inputNode->getOperator(), inputIdx));
}
++inputIdx;
}
}
// The outputs of the output nodes are also the outputs of the meta-operator
for (const auto& outputNode : outputNodes) {
AIDGE_ASSERT(mGraph->inView(outputNode), "output node must be in the graph");
const std::vector<std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>> outputNodeoutputs =
outputNode->outputs();
for (int outputIdx = 0; outputIdx < outputNodeoutputs.size(); ++outputIdx) {
mOutputOps.push_back(std::make_pair(outputNode->getOperator(), outputIdx));
}
}
AIDGE_INTERNAL_ASSERT(mInputOps.size() == mGraph->inputs().size());
AIDGE_INTERNAL_ASSERT(mOutputOps.size() == mGraph->outputs().size());
}
Aidge::NbElts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const {
if (mImpl) {
return mImpl->getNbRequiredData(inputIdx);
}
else {
const auto& inputOp = mInputOps[inputIdx];
return inputOp.first->getNbRequiredData(inputOp.second);
}
}
Aidge::NbElts_t Aidge::MetaOperator_Op::getNbConsumedData(IOIndex_t inputIdx) const {
if (mImpl) {
return mImpl->getNbConsumedData(inputIdx);
}
else {
const auto& inputOp = mInputOps[inputIdx];
return inputOp.first->getNbConsumedData(inputOp.second);
}
}
Aidge::NbElts_t Aidge::MetaOperator_Op::getNbProducedData(IOIndex_t outputIdx) const {
if (mImpl) {
return mImpl->getNbProducedData(outputIdx);
}
else {
const auto& outputOp = mOutputOps[outputIdx];
return outputOp.first->getNbProducedData(outputOp.second);
}
}
void Aidge::MetaOperator_Op::updateConsummerProducer() {
if (mImpl) {
mImpl->updateConsummerProducer();
}
else {
if (!mScheduler) {
// Lazy initialization
mScheduler = std::make_shared<SequentialScheduler>(mGraph);
}
// TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule.
// It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()"
mScheduler->generateScheduling();
}
}
void Aidge::MetaOperator_Op::forward() {
if (mImpl) {
// A custom implementation exists for this meta operator
mImpl->forward();
}
else {
// No custom implementation, use the individual operators implementations
if (!mScheduler) {
// Lazy initialization
// TODO: should we assert that a scheduler already exists at this point?
// => should be created in updateConsummerProducer()
mScheduler = std::make_shared<SequentialScheduler>(mGraph);
mScheduler->generateScheduling();
}
mScheduler->forward(false);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment