diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index cd36a654772d2d641b9af32bb74b1336f4a9742d..5ff1159e6be4dc3bbd7ea3c893f1ef59eb429ae0 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -80,6 +80,7 @@ #include "aidge/operator/Split.hpp" #include "aidge/operator/Sqrt.hpp" #include "aidge/operator/Sub.hpp" +#include "aidge/operator/Sum.hpp" #include "aidge/operator/Transpose.hpp" #include "aidge/scheduler/Scheduler.hpp" #include "aidge/stimuli/Stimulus.hpp" diff --git a/include/aidge/operator/Sum.hpp b/include/aidge/operator/Sum.hpp new file mode 100644 index 0000000000000000000000000000000000000000..6718f4179f09c5594c99859bb39e75610de32bba --- /dev/null +++ b/include/aidge/operator/Sum.hpp @@ -0,0 +1,90 @@ +/******************************************************************************** + * Copyright (c) 2025 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + + #ifndef AIDGE_CORE_OPERATOR_SUM_H_ + #define AIDGE_CORE_OPERATOR_SUM_H_ + + #include <memory> + #include <string> + #include <vector> + + #include "aidge/operator/OperatorTensor.hpp" + #include "aidge/graph/Node.hpp" + #include "aidge/utils/ErrorHandling.hpp" + #include "aidge/utils/Types.h" + #include "aidge/utils/Registrar.hpp" + + namespace Aidge { + + /** + * @brief Description of an element-wise Sum operation on multiple input Tensors, + * supporting NumPy broadcasting. + * + * For each N of elements x0, x1, ..., xN from the input Tensors, the function + * is defined as: + * `f(x0, ..., xN) = x0 + x1 + ... + xN` + * + * Broadcasting adjusts shapes of the input Tensors to make them compatible: + * - Tensors are aligned from the rightmost dimensions. + * - Dimensions are compatible if they are equal, one of them is 1, or missing. + * + * The output Tensor shape is determined by taking the maximum size along + * each dimension of the input Tensors after broadcasting. + * + * @example Input 1: (3, 4, 2), Input 2: (2), Output: (3, 4, 2) + * @example Input 1: (1, 5, 3), Input 2: (2, 1, 3), Input 3 : (2), Output: (2, 5, 3) + * + * @see OperatorTensor + * @see Registrable + */ + class Sum_Op : public OperatorTensor, + public Registrable<Sum_Op, + std::string, + std::function<std::shared_ptr<OperatorImpl>(const Sum_Op&)>> + { + public: + static const std::string Type; + + Sum_Op() = delete; + Sum_Op(const IOIndex_t nbIn); + + /** + * @brief Copy-constructor. + * @param op Sum_Op to copy. + * @details Copies the operator attributes and its output tensor(s), but not + * its input tensors. The new operator has no associated input. + */ + Sum_Op(const Sum_Op& op); + + /** + * @brief Clone the operator using its copy-constructor. + * @see Operator::Sum_Op + */ + std::shared_ptr<Operator> clone() const override; + + bool forwardDims(bool allowDataDependency = false) override final; + + void setBackend(const std::string& name, DeviceIdx_t device = 0) override; + std::set<std::string> getAvailableBackends() const override; + + static const std::vector<std::string> getInputsName() { + return {"data_input_0", "data_input_n"}; + } + static const std::vector<std::string> getOutputsName() { + return {"data_output"}; + } + }; + + std::shared_ptr<Node> Sum(const IOIndex_t nbIn, const std::string& name = ""); + } + + #endif /* AIDGE_CORE_OPERATOR_SUM_H_ */ + \ No newline at end of file diff --git a/python_binding/operator/pybind_Sum.cpp b/python_binding/operator/pybind_Sum.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2d09d4736c89c931512b15990e86fe7c83e17619 --- /dev/null +++ b/python_binding/operator/pybind_Sum.cpp @@ -0,0 +1,67 @@ +/******************************************************************************** + * Copyright (c) 2025 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + + #include <memory> + + #include <pybind11/pybind11.h> + + #include "aidge/operator/Sum.hpp" + #include "aidge/operator/OperatorTensor.hpp" + #include "aidge/utils/Types.h" + + namespace py = pybind11; + namespace Aidge { + + void declare_Sum(py::module &m) { + py::class_<Sum_Op, std::shared_ptr<Sum_Op>, OperatorTensor>(m, "SumOp", py::multiple_inheritance(), + R"mydelimiter( + Initialize a Sum operator. + This operator performs element-wise addition between multiple input tensors. + The operation is defined as: + Output = Input1 + Input2 + ... + InputN + The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting. + Examples: + Input 1: (3, 4, 2), Input 2: (2), Output: (3, 4, 2) + Input 1: (1, 5, 3), Input 2: (2, 1, 3), Input 3: (2), Output: (2, 5, 3) + :param name : Name of the node (optional). + :type name : str + )mydelimiter") + .def(py::init<const IOIndex_t>(), py::arg("nb_inputs")) + .def_static("get_inputs_name", &Sum_Op::getInputsName) + .def_static("get_outputs_name", &Sum_Op::getOutputsName) + .def_readonly_static("Type", &Sum_Op::Type); + + declare_registrable<Sum_Op>(m, "SumOp"); + + m.def("Sum", &Sum, py::arg("nb_inputs"), py::arg("name") = "", + R"mydelimiter( + Initialize a node containing a sum operator that performs element-wise addition between multiple tensors. + The operation is defined as: + Output = Input1 + Input2 + ... + InputN + The output tensor shape is determined by taking the maximum size along each dimension of the input tensors after broadcasting. + Examples: + Input 1: (3, 4, 2), Input 2: (2), Output: (3, 4, 2) + Input 1: (1, 5, 3), Input 2: (2, 1, 3), Input 3: (2), Output: (2, 5, 3) + :param nb_inputs : number of inputs to sum. + :type nb_inputs : int + :param name : Name of the node (optional). + :type name : str + :return: A node containing the Sum operator. + :rtype: :py:class:`SumOp` + )mydelimiter"); + } + + void init_Sum(py::module &m) { + declare_Sum(m); + } + + } // namespace Aidge + \ No newline at end of file diff --git a/src/operator/Sum.cpp b/src/operator/Sum.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c6e5fe2921f9549110909003355c97d420c74cb --- /dev/null +++ b/src/operator/Sum.cpp @@ -0,0 +1,95 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + + #include <cstddef> // std::size_t + #include <stdexcept> // std::runtime_error + #include <string> + #include <vector> + + #include "aidge/data/Tensor.hpp" + #include "aidge/operator/Sum.hpp" + #include "aidge/utils/Types.h" + #include "aidge/utils/ErrorHandling.hpp" + #include "aidge/utils/Registrar.hpp" + + const std::string Aidge::Sum_Op::Type = "Sum"; + + Aidge::Sum_Op::Sum_Op(const IOIndex_t nbIn) + : OperatorTensor(Type, std::vector<InputCategory>(nbIn, InputCategory::Data), 1) { + if (nbIn == 0) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Sum operator should have at least one input."); + } + } + + Aidge::Sum_Op::Sum_Op(const Sum_Op& op) + : OperatorTensor(op) + { + if (op.mImpl) { + SET_IMPL_MACRO(Sum_Op, *this, op.backend()); + } else { + mImpl = nullptr; + } + } + + std::shared_ptr<Aidge::Operator> Aidge::Sum_Op::clone() const { + return std::make_shared<Sum_Op>(*this); + } + + bool Aidge::Sum_Op::forwardDims(bool /*allowDataDependency*/) { + if (inputsAssociated()) { + std::vector<std::vector<std::size_t>> inputsDims(nbInputs()); + for (std::size_t i = 0; i < nbInputs(); i++) { + inputsDims[i] = getInput(i)->dims(); + } + + std::size_t outNbDims = 1; + for(std::size_t i = 0; i < nbInputs(); ++i) { + outNbDims = (inputsDims[i].size() > outNbDims) ? inputsDims[i].size() : outNbDims; + } + + std::vector<std::size_t> outDims(outNbDims, 1); + + for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) { + for (std::size_t i = 0; i < nbInputs(); ++i) { + if(!inputsDims[i].empty()) { + const std::size_t dim = inputsDims[i].back(); + inputsDims[i].pop_back(); + if (*it == 1) { + *it = dim; + } + else if ((dim != *it) && (dim != 1)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible Tensor shape for Add Operation: {} for previous inputs vs {} for input#{}", + outDims, getInput(i)->dims(), i); + } + } + } + } + mOutputs[0]->resize(outDims); + return true; + } + + return false; + } + + void Aidge::Sum_Op::setBackend(const std::string& name, DeviceIdx_t device) { + SET_IMPL_MACRO(Sum_Op, *this, name); + mOutputs[0]->setBackend(name, device); + } + + std::set<std::string> Aidge::Sum_Op::getAvailableBackends() const { + return Registrar<Sum_Op>::getKeys(); + } + + //////////////////////////////////////////////////////////////////////////////// + + std::shared_ptr<Aidge::Node> Aidge::Sum(const IOIndex_t nbIn, const std::string& name) { + return std::make_shared<Node>(std::make_shared<Sum_Op>(nbIn), name); + } \ No newline at end of file