diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp index c063c0b8d966b230f1d707b84239c1acb4be4d77..7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc 100644 --- a/src/operator/Producer.cpp +++ b/src/operator/Producer.cpp @@ -72,9 +72,6 @@ void Aidge::Producer_Op::forward() { if (!backend().empty()) { mImpl->forward(); } - // else { - // fmt::print("Basic Producer forward() function.\n"); - // } runHooks(); } diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp index cbd2f173d076689c593e2d1c01723396c5b88bbe..6e3df1bb38e4a4f7650326ce1c36fcdede7cacc9 100644 --- a/src/scheduler/SequentialScheduler.cpp +++ b/src/scheduler/SequentialScheduler.cpp @@ -74,21 +74,6 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std } void Aidge::SequentialScheduler::backward() { - // create ad set Grad values - // if (instanciateGrad) { compile_gradient(mGraphView); } - - // const auto& ordered_outputs = mGraphView->getOrderedOutputs(); - // AIDGE_ASSERT(ordered_outputs.size() == data.size(), "You must provide the \ - // right number of data objects to run the backward function. \ - // {} outputs detected for the current GraphView when {} were \ - // provided.", ordered_outputs.size(), data.size()); - // for (std::size_t i = 0; i < ordered_outputs.size(); ++i) { - // const std::shared_ptr<OperatorTensor> op_ = std::dynamic_pointer_cast<OperatorTensor>(ordered_outputs[i].first->getOperator()); - // const std::shared_ptr<Tensor> t_grad = op_->getOutput(ordered_outputs[i].second)->grad(); - // AIDGE_ASSERT(data[i]->dims() == t_grad->dims(), "Wrong gradient size (expected {}, got {}).", t_grad->dims(), data[i]->dims()); - // *t_grad = data[i]->clone(); - // } - // Generate scheduling *only if empty* // If scheduling was already generated (in one or several steps, i.e. one or // several successive call to generateScheduling()), do not generate it twice