/******************************************************************************** * Copyright (c) 2023 CEA-List * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License 2.0 which is available at * http://www.eclipse.org/legal/epl-2.0. * * SPDX-License-Identifier: EPL-2.0 * ********************************************************************************/ #include <cassert> #include <numeric> // std::accumulate #include <vector> #include "aidge/utils/Types.h" #include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" #include "aidge/backend/cpu/operator/AddImpl.hpp" #include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp" Aidge::NbElts_t Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const { // this implementation can be in-place return 0; } void Aidge::AddImpl_cpu::forward() { assert(mOp.getRawInput(0) && "missing input in Add operator"); DataType datatypeFirstInput = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(); for (IOIndex_t i = 1; i < mOp.nbInputs(); ++i) { assert(mOp.getRawInput(i) && "missing input in Add operator"); assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->dataType() == datatypeFirstInput); } auto kernelFunc = Registrar<AddImplForward_cpu>::create({ datatypeFirstInput, std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); std::vector<const void*> opInputs; for (IOIndex_t i = 0; i < mOp.nbInputs(); ++i) { opInputs.push_back(std::static_pointer_cast<Tensor>(mOp.getRawInput(i))->getImpl()->rawPtr()); } kernelFunc(std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size(), opInputs, std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr()); }