Skip to content
Snippets Groups Projects
Forked from Eclipse Projects / aidge / aidge_core
2463 commits behind the upstream repository.
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
Add.hpp 5.38 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#ifndef AIDGE_CORE_OPERATOR_ADD_H_
#define AIDGE_CORE_OPERATOR_ADD_H_

#include <numeric>
#include <vector>
#include <cmath>
#include <memory>
#include <array>

#include "aidge/utils/Registrar.hpp"
#include "aidge/operator/Operator.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/utils/Types.h"

namespace Aidge {

template <std::size_t NUM>
class Add_Op : public Operator,
    public Registrable<Add_Op<NUM>, std::string, std::unique_ptr<OperatorImpl>(const Add_Op<NUM>&)> {
public:
    // FIXME: change accessibility
    std::array<std::shared_ptr<Tensor>, NUM> mInputs;
    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>(shared_from_this());

public:
    static constexpr const char* Type = "Add";

    constexpr Add_Op()
            : Operator(Type),
            mOutput(std::make_shared<Tensor>())
    {
        assert(NUM > 0 && "Add should have at least one input");
        for (std::size_t i = 0; i<NUM; ++i) {
            mInputs[i] = std::make_shared<Tensor>();
        }
        setDatatype(DataType::Float32);
    }

    // Data operator[](const char* inputName) override final {
    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
    //         (strcmp(inputName, "weight") ? mInputs[1] :
    //         (strcmp(inputName, "bias") ? mInputs[2] :
    //         nullptr));
    //     assert((in!=nullptr) && "No such parameter");
    //     return *in;
    // }

    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
        assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");

        mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
    }

    constexpr void computeOutputDims() override final {
        if (!mInputs[0]->empty()) {
            const auto expectedDims =  mInputs[0]->dims();
            std::size_t nonEmptyInputTensor = 1;
            for (; nonEmptyInputTensor<NUM && (!mInputs[nonEmptyInputTensor]->empty()); ++nonEmptyInputTensor) {
                assert(expectedDims == mInputs[nonEmptyInputTensor]->dims());
            }
            if (nonEmptyInputTensor == NUM) {
                mOutput->resize(expectedDims);
            }
        }
    }

    bool outputDimsForwarded() const override final {
        std::size_t forwarded = 0;
        for (; forwarded < NUM && (!mInputs[forwarded]->empty()); ++forwarded) {}
        return ((forwarded==NUM) && !(mOutput->empty()));
    }

    // void checkDims() const override final {
    //     assert(outputDimsForwarded());
    //     for (const auto& in : mInputs) {
    //         assert(in->dims() == mOutput->dims());
    //     }
    // }
    inline Tensor& input(const IOIndex_t inputIdx) const override final {
        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
        return *(mInputs[inputIdx].get());
    }
    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }

    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
        return mInputs[inputIdx];
    }
    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
        assert(outputIdx == 0 && "Add Operators has only 1 outputs");
        return mOutput;
    }

    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
        assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
        return std::static_pointer_cast<Data>(mInputs[inputIdx]);
    }
    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
        assert(outputIdx == 0 && "operator supports only 1 output");
        return std::static_pointer_cast<Data>(mOutput);
    }


    void setBackend(const std::string& name) {
        mImpl = Registrar<Add_Op<NUM>>::create(name)(*this);
        mOutput->setBackend(name);

        // FIXME: temporary workaround
        for (std::size_t i = 0; i < NUM; ++i) {
            mInputs[i]->setBackend(name);
        }
    }

    void setDatatype(const DataType& datatype) {
        mOutput->setDatatype(datatype);

        // FIXME: temporary workaround
        for (std::size_t i = 0; i < NUM; ++i) {
            mInputs[i]->setDatatype(datatype);
        }
    }

    inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
    inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
};
template <std::size_t NUM>
inline std::shared_ptr<Node> Add(const char* name = nullptr) {
    return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
}
}

#endif /* AIDGE_CORE_OPERATOR_ADD_H_ */