/********************************************************************************
 * Copyright (c) 2023 CEA-List
 *
 * This program and the accompanying materials are made available under the
 * terms of the Eclipse Public License 2.0 which is available at
 * http://www.eclipse.org/legal/epl-2.0.
 *
 * SPDX-License-Identifier: EPL-2.0
 *
 ********************************************************************************/

#ifndef AIDGE_CORE_OPERATOR_CONV_H_
#define AIDGE_CORE_OPERATOR_CONV_H_

#include <array>
#include <cmath>
#include <numeric>
#include <vector>

#include "aidge/data/Tensor.hpp"
#include "aidge/graph/Node.hpp"
#include "aidge/operator/OperatorTensor.hpp"
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/StaticAttributes.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"

namespace Aidge {
enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelDims };

template <DimIdx_t DIM>
class Conv_Op : public OperatorTensor,
                public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
                public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                       DimSize_t, std::array<DimSize_t, DIM>> {

public:
    static constexpr const char *Type = "Conv";

    Conv_Op() = delete;

    using Attributes_ = StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>,
                                             DimSize_t, DimSize_t, std::array<DimSize_t, DIM>>;
    template <ConvAttr e>
    using attr = typename Attributes_::template attr<e>;

    constexpr Conv_Op(DimSize_t in_channels,
                      DimSize_t out_channels,
                      const std::array<DimSize_t, DIM> &kernel_dims,
                      const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                      const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
        : OperatorTensor(Type, 1, 2, 1),
          Attributes_(attr<ConvAttr::StrideDims>(stride_dims),
                      attr<ConvAttr::DilationDims>(dilation_dims),
                      attr<ConvAttr::InChannels>(in_channels),
                      attr<ConvAttr::OutChannels>(out_channels),
                      attr<ConvAttr::KernelDims>(kernel_dims)) {
        setDataType(DataType::Float32);
    }

    /**
     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
     * @param op Operator to copy.
     */
    Conv_Op(const Conv_Op<DIM>& op)
        : OperatorTensor(Type, 1, 2, 1),
          Attributes_(op),
          mOutput(std::make_shared<Tensor>(*op.mOutput))
    {
        // cpy-ctor
        setDataType(op.mOutput->dataType());
        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr;
    }

    /**
     * @brief Clone the operator using its copy-constructor.
     * @see Operator::Conv_Op
     */
    std::shared_ptr<Operator> clone() const override {
        return std::make_shared<Conv_Op<DIM>>(*this);
    }

    // Data operator[](const char* inputName) override final {
    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
    //         (strcmp(inputName, "weight") ? mInputs[1] :
    //         (strcmp(inputName, "bias") ? mInputs[2] :
    //         nullptr));
    //     assert((in!=nullptr) && "No such parameter");
    //     return *in;
    // }

    // std::shared_ptr<Conv_Op> clone() const override final {

    // }

    void computeOutputDims() override final {
        if (!mInputs[0]->empty()) {
            std::array<DimSize_t, DIM + 2> outputDims = {};

            for (std::size_t dim = 0; dim < this->template getAttr<ConvAttr::KernelDims>().size() ; ++dim) {
                const DimSize_t kernelExtent = this->template getAttr<ConvAttr::DilationDims>()[dim] *
                                                       (this->template getAttr<ConvAttr::KernelDims>()[dim] - 1) +
                                               1;

                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
                        floor(static_cast<float>(mInputs[0]->dims()[dim+2] - kernelExtent) /
                              static_cast<float>(this->template getAttr<ConvAttr::StrideDims>()[dim])));
            }

            outputDims[1] = this->template getAttr<ConvAttr::OutChannels>();
            outputDims[0] = mInputs[0]->dims()[0];
            mOutput->resize(outputDims);
        }
    }

    void setBackend(const std::string &name) override {
        mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
        mOutput->setBackend(name);

        // FIXME: temporary workaround
        mInputs[1]->setBackend(name);
        mInputs[2]->setBackend(name);
    }

    static const std::vector<std::string> getInputsName(){
        return {"data_input", "weight", "bias"};
    }
    static const std::vector<std::string> getOutputsName(){
        return {"data_output"};
    }
};

template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
                                  DimSize_t out_channels,
                                  const std::array<DimSize_t, DIM> &kernel_dims,
                                  const std::string& name = "",
                                  const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                  const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
    // FIXME: properly handle default w&b initialization in every cases
    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
    auto conv = std::make_shared<Node>(std::make_shared<Conv_Op<static_cast<DimIdx_t>(DIM)>>(in_channels, out_channels, kernel_dims, stride_dims, dilation_dims), name);
    // addProducer(conv, 1, append(append(kernel_dims, in_channels), out_channels), "w");
    addProducer(conv, 1, append(out_channels, append(in_channels, kernel_dims)), "w");
    addProducer(conv, 2, std::array<DimSize_t, 1>({out_channels}), "b");
    return conv;
}

// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> Conv(
    DimSize_t in_channels,
    DimSize_t out_channels,
    DimSize_t const (&kernel_dims)[DIM],
    const std::string& name = "",
    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
    const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
    return Conv(in_channels, out_channels, to_array(kernel_dims), name, stride_dims, dilation_dims);
}
}  // namespace Aidge

namespace {
template <>
const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
    "StrideDims",
    "DilationDims",
    "InChannels",
    "OutChannels",
    "KernelDims"
};
}

#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */