/******************************************************************************** * Copyright (c) 2023 CEA-List * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0. * * SPDX-License-Identifier: EPL-2.0 * ********************************************************************************/ #include <cassert> #include <chrono> // std::chrono::milliseconds #include <numeric> // std::accumulate #include <thread> // std::this_thread::sleep_for #include <vector> #include "aidge/operator/Sigmoid.hpp" #include "aidge/utils/Types.h" #include "aidge/backend/cpu/data/GetCPUPtr.h" #include "aidge/backend/cpu/operator/SigmoidImpl.hpp" #include "aidge/backend/cpu/operator/SigmoidImpl_kernels.hpp" template <> void Aidge::SigmoidImpl_cpu::forward() { const Sigmoid_Op& op_ = dynamic_cast<const Sigmoid_Op&>(mOp); std::shared_ptr<Tensor> in0 = op_.getInput(0); std::shared_ptr<Tensor> out0 = op_.getOutput(0); AIDGE_ASSERT(in0, "missing input #0"); // Find the correct kernel type const auto impl = Registrar<SigmoidImpl_cpu>::create(getBestMatch(getRequiredSpec())); // Call kernel impl.forward(in0->size(), getCPUPtr(mOp.getRawInput(0)), getCPUPtr(mOp.getRawOutput(0))); } template <> void Aidge::SigmoidImpl_cpu::backward() { const Sigmoid_Op& op_ = dynamic_cast<const Sigmoid_Op&>(mOp); std::shared_ptr<Tensor> out0 = op_.getOutput(0); std::shared_ptr<Tensor> gra_int0 = op_.getInput(0)->grad(); std::shared_ptr<Tensor> gra_out0 = op_.getOutput(0)->grad(); AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type()); // Find the correct kernel type const auto impl = Registrar<SigmoidImpl_cpu>::create(getBestMatch(getRequiredSpec())); // Call kernel impl.backward(gra_int0->size(), getCPUPtr(out0), getCPUPtr(gra_out0), getCPUPtr(gra_int0)); }