diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index da0d23c9de978ebcdbb370a6f4a92262829e05b9..73b85c8a409e675c849b9ca66557c63b5acf6359 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -12,6 +12,7 @@ build:ubuntu_cpp:
     - make -j4 all install
 
   artifacts:
+    expire_in: 1 week
     paths:
       - build_cpp/
       - install_cpp/
@@ -29,6 +30,7 @@ build:ubuntu_python:
     - export AIDGE_INSTALL=`pwd`/install
     - python3 -m pip install .
   artifacts:
+    expire_in: 1 week
     paths:
       - venv/
 
@@ -57,6 +59,7 @@ build:windows_cpp:
     - cmake --install . --config Debug
 
   artifacts:
+    expire_in: 1 week
     paths:
       - build_cpp/
       - install_cpp/
diff --git a/.gitlab/ci/coverage.gitlab-ci.yml b/.gitlab/ci/coverage.gitlab-ci.yml
index 027f3078180bb32b36ca4666f171dda90ef7f7be..3c7b7654190e0768adc6a904f1cb548f020b0c92 100644
--- a/.gitlab/ci/coverage.gitlab-ci.yml
+++ b/.gitlab/ci/coverage.gitlab-ci.yml
@@ -24,8 +24,10 @@ coverage:ubuntu_python:
   script:
     - source venv/bin/activate
     - python3 -m pip install numpy coverage
-    - cd aidge_core
-    - python3 -m coverage run --source=. -m unittest discover -s unit_tests/ -v -b
+    - cd ${CI_PROJECT_NAME}
+    # Retrieve the installation path of the module, since it is installed with pip.
+    - export MODULE_LOCATION=`python -c "import ${CI_PROJECT_NAME} as _; print(_.__path__[0])"`
+    - python3 -m coverage run --source=$MODULE_LOCATION -m unittest discover -s unit_tests/ -v -b
     - python3 -m coverage report
     - python3 -m coverage xml
   coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
@@ -33,4 +35,4 @@ coverage:ubuntu_python:
     reports:
       coverage_report:
         coverage_format: cobertura
-        path: aidge_core/coverage.xml
+        path: ${CI_PROJECT_NAME}/coverage.xml
diff --git a/.gitlab/ci/static_analysis.gitlab-ci.yml b/.gitlab/ci/static_analysis.gitlab-ci.yml
index f7c09a33a65801fb25b1f20f76eac5a7a7952917..3955b87d4efdd9b3610b661779ab9709320754f2 100644
--- a/.gitlab/ci/static_analysis.gitlab-ci.yml
+++ b/.gitlab/ci/static_analysis.gitlab-ci.yml
@@ -26,8 +26,8 @@ static_analysis:python:
   script:
     - pip install pylint
     - pip install pylint-gitlab
-    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter aidge_core/ > codeclimate.json
-    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter aidge_core/ > pylint.html
+    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter ${CI_PROJECT_NAME}/ > codeclimate.json
+    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter ${CI_PROJECT_NAME}/ > pylint.html
     - mkdir -p public/python/$CI_COMMIT_REF_NAME
     - mv pylint.html public/python/$CI_COMMIT_REF_NAME/
   artifacts:
diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
index 924fd995aff34016cd4fa792a550d3d06db0449c..81e6ca9ac5b868287aa0ef27040c0ead785d3639 100644
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ b/.gitlab/ci/test.gitlab-ci.yml
@@ -17,14 +17,14 @@ test:ubuntu_python:
     - docker
   script:
     - source venv/bin/activate
-    - cd aidge_core
+    - cd ${CI_PROJECT_NAME}
     - python3 -m pip install unittest-xml-reporting
     - python3 -m pip list
     # Run on discovery all tests located in core/unit_tests/python
     - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml
   artifacts:
     reports:
-      junit: aidge_core/xmlrunner-results.xml
+      junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
 
 test:windows_cpp:
   stage: test
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 67ad9304bc3e682a9436fb52306b3ca8120c1c4b..b764086c8e974dc53aadd345cdd287918d599afb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -52,9 +52,9 @@ target_include_directories(${module_name}
 )
 
 # PYTHON BINDING
-generate_python_binding(${project} ${module_name})
-
 if (PYBIND)
+    generate_python_binding(${project} ${module_name})
+
     # Handles Python + pybind11 headers dependencies
     target_link_libraries(${module_name}
         PUBLIC 
@@ -66,22 +66,12 @@ endif()
 
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
-
-if(WERROR)
-    target_compile_options(${module_name} PRIVATE
+target_compile_options(${module_name} PRIVATE
     $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-    -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>)
-    target_compile_options(${module_name} PRIVATE
+    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
+target_compile_options(${module_name} PRIVATE
     $<$<CXX_COMPILER_ID:MSVC>:
     /W4>)
-else()
-    target_compile_options(${module_name} PRIVATE
-        $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-        -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
-        target_compile_options(${module_name} PRIVATE
-        $<$<CXX_COMPILER_ID:MSVC>:
-        /W4>)
-endif()
 
 if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
     append_coverage_compiler_flags()
diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake
index 18f4abc38e2537c3f4d949f08772a57b90758cb0..8030c1a8639e4b7ae0c5fb865e928a4260c6ae7d 100644
--- a/cmake/PybindModuleCreation.cmake
+++ b/cmake/PybindModuleCreation.cmake
@@ -1,23 +1,21 @@
-function(generate_python_binding name target_to_bind) 
-    if (PYBIND)
-        add_definitions(-DPYBIND)
-        Include(FetchContent)
+function(generate_python_binding name target_to_bind)
+    add_definitions(-DPYBIND)
+    Include(FetchContent)
 
-        FetchContent_Declare(
-        PyBind11
-        GIT_REPOSITORY https://github.com/pybind/pybind11.git
-        GIT_TAG        v2.10.4 # or a later release
-        )
+    FetchContent_Declare(
+    PyBind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG        v2.10.4 # or a later release
+    )
 
-        # Use the New FindPython mode, recommanded. Requires CMake 3.15+
-        find_package(Python COMPONENTS Interpreter Development)
-        FetchContent_MakeAvailable(PyBind11)
+    # Use the New FindPython mode, recommanded. Requires CMake 3.15+
+    find_package(Python COMPONENTS Interpreter Development)
+    FetchContent_MakeAvailable(PyBind11)
 
-        message(STATUS "Creating binding for module ${name}")
-        file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
+    message(STATUS "Creating binding for module ${name}")
+    file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
-        pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-        target_include_directories(${name} PUBLIC "python_binding")
-        target_link_libraries(${name} PUBLIC ${target_to_bind})        
-    endif()
+    pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
+    target_include_directories(${name} PUBLIC "python_binding")
+    target_link_libraries(${name} PUBLIC ${target_to_bind})
 endfunction()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index ff6601c487ea97294019a12ba899d251b08077e7..13c360796fb4912ffb6b5ad17d68c7b56b38b943 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -34,7 +34,8 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/Matmul.hpp"
-#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+//#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 5aa2829e16f612b0867ab69feccb829ba2095e1b..d10270b62bb75412a6cbd9203b9b7a3fe220e5aa 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -20,7 +20,7 @@ namespace Aidge {
 class OperatorImpl {
 public:
     virtual void forward(){};
-    virtual void backward() {}
+    virtual void backward(){};
 
     /**
      * @brief Minimum amount of data from a specific input required by the
@@ -46,13 +46,19 @@ public:
     virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0;
 
     /**
-     * @brief TOtal amount of produced data ready to be used on a specific output.
+     * @brief Total amount of produced data ready to be used on a specific output.
      *
      * @param outputIdx Index of the output analysed.
      * @return DimSize_t
      */
     virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0;
 
+    /**
+     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     *
+     */
+    virtual void updateConsummerProducer() = 0;
+
     virtual ~OperatorImpl() = default;
 };
 } // namespace Aidge
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index 560c3a02c641c29526752dbf352905d0ded32a7e..9d1ba6fd1e1df594634bfd93a24663ff178b7ee6 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -55,7 +55,7 @@ public:
  * @param inputs List of Node and GraphView to link sequentially.
  * @return std::shared_ptr<GraphView> Pointer to the generated view.
  */
-std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
+std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs);
 
 /////////////////////////////
 // Parallel
@@ -65,7 +65,7 @@ std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
  * @param inputs List of Node and GraphView to link sequentially.
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
+std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
 
 /////////////////////////////
 // Residual
@@ -79,8 +79,8 @@ std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
  * @param inputs List of Node and GraphView to link sequentially.
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs);
+std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs);
 
 }
 
-#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..073243e801c6e1297129424b0c93b1a7c4f112f3
--- /dev/null
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -0,0 +1,174 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class MaxPooling_Op : public Operator,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Parameterizable<MaxPoolingParam,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, (DIM<<1) >> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "MaxPooling";
+
+    MaxPooling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<MaxPoolingParam,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, (DIM<<1)> >;
+    template <MaxPoolingParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+        : Operator(Type),
+          Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
+                           param<MaxPoolingParam::KernelDims>(kernel_dims),
+                           param<MaxPoolingParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+    }
+
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name = "",
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    return avgPool;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> MaxPooling(
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 30e1ce2a7f664485077282405ec60ddf49513cb5..36f846ddae329be28b8e51e2bff1580a509562e1 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -78,6 +78,8 @@ public:
      */
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
+    void updateConsummerProducer();
+
     virtual void forward();
 
     virtual void backward();
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 81b3f31662933fe4f59a17cdb0ee42441fb791bc..9916ee2004bd1aa9f33acf96d95cae4703f692df 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -43,6 +43,8 @@ public:
     };
     ~SequentialScheduler() = default;
 
+    void generateScheduling(bool verbose = false);
+
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
@@ -54,6 +56,15 @@ public:
      */
     void saveSchedulingDiagram(const std::string& fileName) const;
 
+    /**
+     * @brief Return a vector of Node ordered by the order they are called by the scheduler
+     *
+     * @return std::vector<std::shared_ptr<Node>>
+     */
+    std::vector<std::shared_ptr<Node>> getStaticScheduling(){
+        return mStaticSchedule;
+    }
+
 private:
     /**
      * @brief Set of layers receiving an input from currently processing layers
@@ -63,9 +74,27 @@ private:
      */
     std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
 
+    /**
+     * @brief Shared ptr to the scheduled graph view
+     *
+     */
     std::shared_ptr<GraphView> mGraphView;
+    /**
+     * @brief List of SchedulingElement (i.e: Nodes with their computation time)
+     *
+     */
     std::vector<SchedulingElement> mScheduling;
+    /**
+     * @brief List of nodes ordered by their
+     *
+     */
+    std::vector<std::shared_ptr<Node>> mStaticSchedule;
+    /**
+     * @brief Number of computation node (i.e: nb nodes != Producer)
+     *
+     */
+    std::size_t mComputationNumber = 0; // TODO: Check if not inferable from mStaticSchedule
 };
 } // namespace Aidge
 
-#endif /* AIDGE_SCHEDULER_H_ */
\ No newline at end of file
+#endif /* AIDGE_SCHEDULER_H_ */
diff --git a/python_binding/graph/pybind_OpArgs.cpp b/python_binding/graph/pybind_OpArgs.cpp
index 305c0b73101a97c242413ff84a5ae099764e7e77..6ea89f91945ac44f2142c5b9e8440b11ec6a1663 100644
--- a/python_binding/graph/pybind_OpArgs.cpp
+++ b/python_binding/graph/pybind_OpArgs.cpp
@@ -10,19 +10,20 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include <pybind11/stl.h>
-#include <pybind11/complex.h>
-#include <pybind11/functional.h>
-#include <pybind11/chrono.h>
+
 
 
 namespace py = pybind11;
 namespace Aidge {
 void init_OpArgs(py::module& m){
     py::class_<OpArgs, std::shared_ptr<OpArgs>>(m, "OpArgs")
+    .def(py::init<const std::shared_ptr<GraphView>&>(), py::arg("view_"))
+    .def(py::init<const std::shared_ptr<Node>&>(), py::arg("node_"))
     .def("node", &OpArgs::node)
     .def("view", &OpArgs::view)
     ;
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9bd951c446e080ff27b099527ac9bbc350646140
--- /dev/null
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+    m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    py::multiple_inheritance())
+  .def(py::init<const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, (DIM<<1)> &>(),
+        py::arg("kernel_dims"),
+        py::arg("stride_dims"),
+        py::arg("padding_dims"));
+  
+  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims) {
+        // Lambda function wrapper because PyBind fails to convert const array.
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        if (kernel_dims.size() != DIM) {
+            throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (stride_dims.size() != DIM) {
+            throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (padding_dims.size() != (DIM<<1)) {
+            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
+        }
+        DimSize_t tmp_kernel_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_kernel_dims_array[i] = kernel_dims[i];
+        }
+        DimSize_t tmp_stride_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_stride_dims_array[i] = stride_dims[i];
+        }
+        DimSize_t tmp_padding_dims_array[DIM<<1];
+        for (size_t i = 0; i < (DIM<<1); ++i) {
+            tmp_padding_dims_array[i] = padding_dims[i];
+        }
+        const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
+        const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
+        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
+        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+    }, py::arg("kernel_dims"),
+       py::arg("name") = "",
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+  
+}
+
+
+void init_MaxPooling(py::module &m) {
+  declare_MaxPoolingOp<1>(m);
+  declare_MaxPoolingOp<2>(m);
+  declare_MaxPoolingOp<3>(m);
+ 
+  // FIXME:
+  // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
+  // (&)[1])>(&MaxPooling));
+}
+} // namespace Aidge
+#endif
\ No newline at end of file
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index b861f881c684a2fbe800ab672299871cfc89d7ac..78418d51a5c410cb56bb8421fd7f3dc6ec6d32db 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -29,6 +29,7 @@ void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_Matmul(py::module&);
+void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_Matmul(m);
+    init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
 
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index 2490d5c55a497223b13bceee6772c2dd44e733ef..85479d41f51e74dee4079e78a37e7f3a520639e2 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/graph/GraphView.hpp"
 
@@ -20,6 +21,8 @@ void init_Scheduler(py::module& m){
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
     .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
+    .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
+    .def("get_static_scheduling", &SequentialScheduler::getStaticScheduling)
     ;
 }
 }
diff --git a/setup.ps1 b/setup.ps1
new file mode 100644
index 0000000000000000000000000000000000000000..61324cf4a7d64094f5ead498adf64719c3290f06
--- /dev/null
+++ b/setup.ps1
@@ -0,0 +1,52 @@
+# Helper setup tool to automatically build aidge_core on Windows.
+
+# Requirements
+################################################################################
+# You have either VS BuildTools or VS Community already present on your 
+# system, with the build tools installed.
+# If not, download Visual Studio Community here:
+# https://visualstudio.microsoft.com/fr/vs/community/
+# Make sure to install the "Desktop Development with C++" workload.
+# Run this script in a Powershell console with Administrator rights in order to
+# automatically install the dependencies, or just execute the second part if you
+# already have all the dependencies satisfied.
+
+# Enable or disable automatic installation of requirements
+# Run .\setup.ps1 -install_reqs:$false to disable it
+param ([bool]$install_reqs=$true)
+
+# Default install path is .\install_cpp
+if (-not $env:AIDGE_INSTALL_PATH)
+{
+    $env:AIDGE_INSTALL_PATH = $(Join-Path $pwd install_cpp)
+}
+
+# 1. Setup environment
+################################################################################
+if ($install_reqs)
+{
+    # Install Chocolatey
+    Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    choco install git -Y
+    choco install python -Y
+    # Update PATH
+    $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+}
+
+# 2. Compile & install aidge_core
+################################################################################
+mkdir -Force build_cpp
+mkdir -Force $env:AIDGE_INSTALL_PATH
+Set-Location build_cpp
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$env:AIDGE_INSTALL_PATH -DCMAKE_BUILD_TYPE=Debug ..
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+cmake --build . -j2
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+cmake --install . --config Debug
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+# Optional: run the unit tests
+ctest --output-on-failure
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+Set-Location $PSScriptRoot
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index f5f33fb049dec440f3bae412348c83e3427f06ce..124878fc45fe632d4a584e76a0eae6e7acfd53b9 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -14,13 +14,13 @@
 #include "aidge/graph/OpArgs.hpp"
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::initializer_list<OpArgs> inputs) {
+std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
             // >= to allow incomplete graphViews
             assert(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size());
-            /* 
+            /*
             *  /!\ mn.view()->outputNodes() is a set, order of Nodes cannot be guaranted.
             *  Prefer a functional description for detailed inputs
             */
@@ -44,7 +44,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::initializer_list<OpArgs
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::initializer_list<OpArgs> inputs) {
+std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
     for(const OpArgs& elt : inputs) {
         if (elt.node()!=nullptr)
@@ -56,7 +56,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::initializer_list<OpArgs>
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::initializer_list<OpArgs> inputs) {
+std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = Sequential(inputs);
     assert(gv->outputNodes().size() == 1U && "Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
     std::shared_ptr<Node> lastNode = *gv->outputNodes().begin();
@@ -70,4 +70,4 @@ std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::initializer_list<OpArgs>
     assert(lastNode->getNbFreeDataInputs()>=1);
     gv->addChild(lastNode, firstNode, 0U, gk_IODefaultIndex);
     return gv;
-}
\ No newline at end of file
+}
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 99b07235e2917527160f03af997747f02947dcf9..b3896b12143488275b2a064819595c380da62844 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -38,6 +38,9 @@ Aidge::NbElts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) co
 Aidge::NbElts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
     return mImpl->getNbProducedData(outputIdx);
 }
+void Aidge::Operator::updateConsummerProducer(){
+    mImpl->updateConsummerProducer();
+}
 
 void Aidge::Operator::forward() { mImpl->forward(); }
 
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index a8069fda9a3a2f4cbb999eeb3974230767069fb8..dc0768d2b6f7a1dd46fc0a8523b950011f7dcf5d 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -33,26 +33,19 @@ void drawProgressBar(double progress, int barWidth, const std::string& additiona
     fflush(stdout);
 }
 
-// TODO: handle multiple inputs/outputs
-void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
-    if (frowardDims) {mGraphView->forwardDims(); }
-
-    mScheduling.clear();
-
+void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
     // setup initial producers list
-    // add each Producer Node.
-    std::set<std::shared_ptr<Node>> computationOver;
-    std::size_t computationNumber = 0;
+    mComputationNumber = 0;
     std::set<std::shared_ptr<Node>> producers;
     for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
         if (nodePtr->type() == "Producer") {
             producers.insert(nodePtr);
         } else {
-            ++computationNumber;
+            ++mComputationNumber;
         }
     }
     // add Data Input
-    // FIXME : shoudl be changed when the real system for providing
+    // FIXME : should be changed when the real system for providing
     // data is implemented
     for (const std::shared_ptr<Node>& nodePtr : mGraphView->inputNodes()) {
         for (const auto& parentPtr : nodePtr->getParents()) {
@@ -112,21 +105,10 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
             }
         }
 
-        // run sequencially every runnable consumers once
-        // TODO: handle memory allocation in scheduler
-        // TODO: optimize memory usage
+        // Push consumers in the list of nodes to run and update the consumer producer system
         for (const auto& runnable : runnableConsumers) {
-            if (verbose)
-                printf("run: %s\n",
-                       (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
-            else
-                drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(computationNumber), 50,
-                                (std::string("running ") + runnable->type() + "_" +
-                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
-            const auto tStart = std::chrono::high_resolution_clock::now();
-            runnable->forward();
-            const auto tEnd = std::chrono::high_resolution_clock::now();
-            mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+            runnable->getOperator()->updateConsummerProducer();
+            mStaticSchedule.push_back(runnable);
         }
 
         // update producers and consumers list
@@ -164,18 +146,6 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
                 }
             }
 
-            bool computationOverForConsumer = true;
-            for (IOIndex_t parentIDi = 0; parentIDi < consumer->nbInputs(); ++parentIDi) {
-                if (consumer->getOperator()->getNbConsumedData(parentIDi) <
-                    consumer->getOperator()->getNbRequiredData(parentIDi)) {
-                    computationOverForConsumer = false;
-                    break;
-                }
-            }
-            if (computationOverForConsumer) {
-                computationOver.insert(consumer);
-            }
-
             for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) {
                 if (consumer->getOperator()->getNbProducedData(outId) > 0) {
                     if (verbose) printf("  also producer\n");
@@ -197,8 +167,52 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
 
         if (verbose) printf("*************\n");
     } while (!consumers.empty());
+
+}
+
+// TODO: handle multiple inputs/outputs
+void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+    if (forwardDims) {mGraphView->forwardDims(); }
+
+    // add each Producer Node.
+    std::set<std::shared_ptr<Node>> computationOver;
+
+    mScheduling.clear();
+
+    this->generateScheduling();
+
+    // TODO: For loop on the list of node to run
+    // run sequencially every runnable consumers once
+    // TODO: handle memory allocation in scheduler
+    // TODO: optimize memory usage
+    for (const auto& runnable : mStaticSchedule) {
+        bool computationOverForConsumer = true;
+        for (IOIndex_t parentIDi = 0; parentIDi < runnable->nbInputs(); ++parentIDi) {
+            if (runnable->getOperator()->getNbConsumedData(parentIDi) <
+                runnable->getOperator()->getNbRequiredData(parentIDi)) {
+                computationOverForConsumer = false;
+                break;
+            }
+        }
+        if (computationOverForConsumer) {
+            computationOver.insert(runnable);
+        }
+
+        if (verbose)
+            printf("run: %s\n",
+                    (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
+        else
+            drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(mComputationNumber), 50,
+                            (std::string("running ") + runnable->type() + "_" +
+                                std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
+        const auto tStart = std::chrono::high_resolution_clock::now();
+        runnable->forward();
+        const auto tEnd = std::chrono::high_resolution_clock::now();
+        mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+    }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
     printf("\n");
+
 }
 
 void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const {
@@ -231,4 +245,4 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::SequentialScheduler::getConsumers(
     }
 
     return consumers;
-}
\ No newline at end of file
+}