Skip to content
Snippets Groups Projects
Forked from Eclipse Projects / aidge / aidge_core
2018 commits behind the upstream repository.
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
AvgPooling.hpp 7.61 KiB
/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#ifndef AIDGE_CORE_OPERATOR_AVGPOOLING_H_
#define AIDGE_CORE_OPERATOR_AVGPOOLING_H_

#include <array>
#include <numeric>
#include <vector>
#include <cmath>

#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"

namespace Aidge {
enum class AvgPoolingAttr { StrideDims, KernelDims };

template <DimIdx_t DIM>
class AvgPooling_Op : public OperatorTensor,
                public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
                public StaticAttributes<AvgPoolingAttr,
                                       std::array<DimSize_t, DIM>,
                                       std::array<DimSize_t, DIM>> {

public:
    static constexpr const char *Type = "AvgPooling";

    AvgPooling_Op() = delete;

    using Attributes_ = StaticAttributes<AvgPoolingAttr,
                                             std::array<DimSize_t, DIM>,
                                             std::array<DimSize_t, DIM>>;
    template <AvgPoolingAttr e>
    using attr = typename Attributes_::template attr<e>;

    constexpr AvgPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
        : OperatorTensor(Type, 1, 0, 1),
          Attributes_(attr<AvgPoolingAttr::StrideDims>(stride_dims),
                      attr<AvgPoolingAttr::KernelDims>(kernel_dims)) {}

    /**
     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
     * @param op Operator to copy.
     */
    AvgPooling_Op(const AvgPooling_Op<DIM>& op)
        : OperatorTensor(op),
          Attributes_(op)
    {
        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
    }

    /**
     * @brief Clone the operator using its copy-constructor.
     * @see Operator::AvgPooling_Op
     */
    std::shared_ptr<Operator> clone() const override {
        return std::make_shared<AvgPooling_Op<DIM>>(*this);
    }


    void computeOutputDims() override final {
        // check inputs have been associated
        if (!getInput(0)) {
            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
        }
        if (!(getInput(0)->empty())) {
            std::array<DimSize_t, DIM + 2> outputDims;
            const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
            outputDims[0] = inputDims[0];
            outputDims[1] = inputDims[1];

            for (std::size_t dim = 0; dim < this->template getAttr<AvgPoolingAttr::KernelDims>().size() ; ++dim) {
                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                                            std::floor(static_cast<float>(inputDims[dim+2] -
                                                                    this->template getAttr<AvgPoolingAttr::KernelDims>()[dim]) /
                                            static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
            }
            getOutput(0)->resize(outputDims);
        }
    }


    std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
    computeReceptiveField(const std::size_t firstIdx,
                            const std::vector<DimSize_t>& outputDims,
                            const IOIndex_t outputIdx = 0) const override final
    {
        if (outputIdx != 0) {
            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
        }
        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
            // Offset
            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
            std::vector<DimSize_t> inputIdxDims = outputIdxDims;

            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
                }
            }

            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
            // Width
            std::vector<DimSize_t> inputDims;
            inputDims.push_back(outputDims[0]); // same batch value
            inputDims.push_back(outputDims[1]); // same channel value

            for (DimIdx_t i = 0; i < DIM; ++i) {
                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
                            * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
                            + 1
                            + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
                inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
            }
            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
            return res;
        }
        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
    }


    void setBackend(const std::string &name, int device = 0) override {
        mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
        mOutputs[0]->setBackend(name, device);
    }

    static const std::vector<std::string> getInputsName(){
        return {"data_input"};
    }
    static const std::vector<std::string> getOutputsName(){
        return {"data_output"};
    }
};

template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
                                           const std::string& name = "",
                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
    return std::make_shared<Node>(std::make_shared<AvgPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims), name);
}

// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> AvgPooling(
    DimSize_t const (&kernel_dims)[DIM],
    const std::string& name = "",
    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1)) {
    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
    return AvgPooling(to_array(kernel_dims), name, stride_dims);
}
}  // namespace Aidge

namespace {
template <>
const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                          "KernelDims"};
}

#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */