From 6b2df0668d36e46283888fe17a72506c59eaf641 Mon Sep 17 00:00:00 2001
From: Vincent TEMPLIER <vincent.templier@cea.fr>
Date: Tue, 19 Sep 2023 16:14:55 +0000
Subject: [PATCH] Add MaxPool operator

---
 include/aidge/aidge.hpp                       |   1 +
 include/aidge/operator/MaxPooling.hpp         | 174 ++++++++++++++++++
 python_binding/operator/pybind_MaxPooling.cpp |  89 +++++++++
 python_binding/pybind_core.cpp                |   2 +
 4 files changed, 266 insertions(+)
 create mode 100644 include/aidge/operator/MaxPooling.hpp
 create mode 100644 python_binding/operator/pybind_MaxPooling.cpp

diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 91386b9e5..13c360796 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -34,6 +34,7 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/Matmul.hpp"
+#include "aidge/operator/MaxPooling.hpp"
 //#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
new file mode 100644
index 000000000..073243e80
--- /dev/null
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -0,0 +1,174 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class MaxPooling_Op : public Operator,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Parameterizable<MaxPoolingParam,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, (DIM<<1) >> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "MaxPooling";
+
+    MaxPooling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<MaxPoolingParam,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, (DIM<<1)> >;
+    template <MaxPoolingParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+        : Operator(Type),
+          Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
+                           param<MaxPoolingParam::KernelDims>(kernel_dims),
+                           param<MaxPoolingParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+    }
+
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name = "",
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    return avgPool;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> MaxPooling(
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
new file mode 100644
index 000000000..9bd951c44
--- /dev/null
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+    m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    py::multiple_inheritance())
+  .def(py::init<const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, (DIM<<1)> &>(),
+        py::arg("kernel_dims"),
+        py::arg("stride_dims"),
+        py::arg("padding_dims"));
+  
+  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims) {
+        // Lambda function wrapper because PyBind fails to convert const array.
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        if (kernel_dims.size() != DIM) {
+            throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (stride_dims.size() != DIM) {
+            throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (padding_dims.size() != (DIM<<1)) {
+            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
+        }
+        DimSize_t tmp_kernel_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_kernel_dims_array[i] = kernel_dims[i];
+        }
+        DimSize_t tmp_stride_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_stride_dims_array[i] = stride_dims[i];
+        }
+        DimSize_t tmp_padding_dims_array[DIM<<1];
+        for (size_t i = 0; i < (DIM<<1); ++i) {
+            tmp_padding_dims_array[i] = padding_dims[i];
+        }
+        const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
+        const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
+        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
+        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+    }, py::arg("kernel_dims"),
+       py::arg("name") = "",
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+  
+}
+
+
+void init_MaxPooling(py::module &m) {
+  declare_MaxPoolingOp<1>(m);
+  declare_MaxPoolingOp<2>(m);
+  declare_MaxPoolingOp<3>(m);
+ 
+  // FIXME:
+  // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
+  // (&)[1])>(&MaxPooling));
+}
+} // namespace Aidge
+#endif
\ No newline at end of file
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index b861f881c..78418d51a 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -29,6 +29,7 @@ void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_Matmul(py::module&);
+void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_Matmul(m);
+    init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
 
-- 
GitLab