diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index 72058dfcba6e811a01a22e261208741879638cad..baa7a486ced74375792cf1ebd3b2f7056168f027 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -78,12 +78,6 @@ public: void computeOutputDims() override final { // Forward dims of micro-graph mGraph->forwardDims(); - - // Associate outputs to micro-graph outputs for custom implementation - for (size_t outputIdx = 0; outputIdx < mOutputOps.size(); ++outputIdx) { - const auto& outputOp = mOutputOps[outputIdx]; - mOutputs[outputIdx] = outputOp.first->getOutput(outputOp.second); - } } bool outputDimsForwarded() const override final { return !(mOutputs[0]->empty()); } diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp index c1f58c68686d9359fa3b8ea4b5eb54244e988895..23a98152a2b155b5e059c25e616eee47040c0aed 100644 --- a/src/operator/MetaOperator.cpp +++ b/src/operator/MetaOperator.cpp @@ -22,10 +22,6 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr< for (std::size_t i = 0; i < mInputs.size(); ++i) { mInputs[i] = std::make_shared<Tensor>(); } - mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size()); - for (std::size_t i = 0; i < mOutputs.size(); ++i) { - mOutputs[i] = std::make_shared<Tensor>(); - } // Fill inputsNodes and outputsNodes when there is no ambiguity if (inputNodes.empty()) { @@ -46,7 +42,7 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr< AIDGE_ASSERT(mGraph->inView(inputNode), "input node must be in the graph"); const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs = inputNode->inputs(); - + int inputIdx = 0; // input idx relative to the current node for (const auto& in : inputNodeinputs) { if (in.first == nullptr || !mGraph->inView(in.first)) { @@ -71,8 +67,15 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const char *type, const std::shared_ptr< } } + AIDGE_INTERNAL_ASSERT(mInputOps.size() == mGraph->inputs().size()); AIDGE_INTERNAL_ASSERT(mOutputOps.size() == mGraph->outputs().size()); + mOutputs = std::vector<std::shared_ptr<Tensor>>(mGraph->outputs().size()); + // Associate outputs to micro-graph outputs for custom implementation + for (size_t outputIdx = 0; outputIdx < mOutputOps.size(); ++outputIdx) { + const auto& outputOp = mOutputOps[outputIdx]; + mOutputs[outputIdx] = outputOp.first->getOutput(outputOp.second); + } } Aidge::NbElts_t Aidge::MetaOperator_Op::getNbRequiredData(const IOIndex_t inputIdx) const { @@ -114,7 +117,7 @@ void Aidge::MetaOperator_Op::updateConsummerProducer() { // Lazy initialization mScheduler = std::make_shared<SequentialScheduler>(mGraph); } - + // TODO: check that generateScheduling() can be called multiple time to iteratively update the schedule. // It could be a good idea to unify updateConsummerProducer() and generateScheduling() into a "updateScheduling()" mScheduler->generateScheduling();