diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 91386b9e5025bf348aafd6815484bee40441ace0..13c360796fb4912ffb6b5ad17d68c7b56b38b943 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -34,6 +34,7 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/Matmul.hpp"
+#include "aidge/operator/MaxPooling.hpp"
 //#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index d07b61c6a3afcaece6367f2a3730d128c8510b4f..c3bc4b112d4726efc2291447569095551b708ef8 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -303,7 +303,7 @@ public:
    * @param inId Input index.
    * @return std::shared_ptr<Node>&
    */
-  inline NodePtr &getParents(const IOIndex_t inId) {
+  inline NodePtr &getParent(const IOIndex_t inId) {
     assert(inId != gk_IODefaultIndex);
     return mParents.at(inId);
   }
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..073243e801c6e1297129424b0c93b1a7c4f112f3
--- /dev/null
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -0,0 +1,174 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class MaxPooling_Op : public Operator,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Parameterizable<MaxPoolingParam,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, (DIM<<1) >> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "MaxPooling";
+
+    MaxPooling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<MaxPoolingParam,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, (DIM<<1)> >;
+    template <MaxPoolingParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+        : Operator(Type),
+          Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
+                           param<MaxPoolingParam::KernelDims>(kernel_dims),
+                           param<MaxPoolingParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+    }
+
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name = "",
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    return avgPool;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> MaxPooling(
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 62b86982053d82bef6e0fd80e490632b95b968e5..e3666d247324fc419570611f41bbe67c7c68cc4e 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -136,6 +136,16 @@ void init_Node(py::module& m) {
             :rtype: int
             )mydelimiter")
 
+            .def("get_parents", &Node::getParents,
+            R"mydelimiter(
+            Get parents.
+            )mydelimiter")
+
+            .def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
+            R"mydelimiter(
+            Get children.
+            )mydelimiter")
+
             .def("__call__", &Node::operator(), py::arg("connectors"));
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9bd951c446e080ff27b099527ac9bbc350646140
--- /dev/null
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+    m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    py::multiple_inheritance())
+  .def(py::init<const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, (DIM<<1)> &>(),
+        py::arg("kernel_dims"),
+        py::arg("stride_dims"),
+        py::arg("padding_dims"));
+  
+  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims) {
+        // Lambda function wrapper because PyBind fails to convert const array.
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        if (kernel_dims.size() != DIM) {
+            throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (stride_dims.size() != DIM) {
+            throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (padding_dims.size() != (DIM<<1)) {
+            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
+        }
+        DimSize_t tmp_kernel_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_kernel_dims_array[i] = kernel_dims[i];
+        }
+        DimSize_t tmp_stride_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_stride_dims_array[i] = stride_dims[i];
+        }
+        DimSize_t tmp_padding_dims_array[DIM<<1];
+        for (size_t i = 0; i < (DIM<<1); ++i) {
+            tmp_padding_dims_array[i] = padding_dims[i];
+        }
+        const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
+        const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
+        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
+        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+    }, py::arg("kernel_dims"),
+       py::arg("name") = "",
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+  
+}
+
+
+void init_MaxPooling(py::module &m) {
+  declare_MaxPoolingOp<1>(m);
+  declare_MaxPoolingOp<2>(m);
+  declare_MaxPoolingOp<3>(m);
+ 
+  // FIXME:
+  // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
+  // (&)[1])>(&MaxPooling));
+}
+} // namespace Aidge
+#endif
\ No newline at end of file
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index b861f881c684a2fbe800ab672299871cfc89d7ac..78418d51a5c410cb56bb8421fd7f3dc6ec6d32db 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -29,6 +29,7 @@ void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_Matmul(py::module&);
+void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_Matmul(m);
+    init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
 
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index af1b68fd3dd7334dd3b28d30b8c791ef50c33b26..1aab107724a1c3a2a5975d7997301fae8ae444e4 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -326,7 +326,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
   // add learnable parameters to the graph
   if (includeLearnableParam) {
     for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
-      std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
+      std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
       if (parentNode) {
           parentNode->addView(shared_from_this());
           mNodes.insert(parentNode);
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 8c98afaf7050d82107587b5a8ec1f8849c40f9e8..0155b22d7b1a429ef48eca043550f78d17660029 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -226,7 +226,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 }
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
-    if (getParents(inId) != nullptr) {
+    if (getParent(inId) != nullptr) {
         printf("Warning, you're replacing a Parent.\n");
     }
     assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index dc565bf0acc7747d79ec12df973a82d86fc79503..561d25776a28f1aad8f8c943711887ec6661a10c 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -59,12 +59,12 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
-    if (matmul->getParents(1)==nullptr) {
-        matmul->getParents(0)->addChild(fc, 0, 1);
+    if (matmul->getParent(1)==nullptr) {
+        matmul->getParent(0)->addChild(fc, 0, 1);
     } else {
-        if (matmul->getParents(0)!=nullptr)
-            matmul->getParents(0)->addChild(fc, 0, 0);
-        matmul->getParents(1)->addChild(fc, 0, 1);
+        if (matmul->getParent(0)!=nullptr)
+            matmul->getParent(0)->addChild(fc, 0, 0);
+        matmul->getParent(1)->addChild(fc, 0, 1);
     }
     (producer_add_bias.first)->addChild(fc,0,2);