diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index 80b0af0ce6c5a8c078f8a9ef232828f0a2cf917e..73b85c8a409e675c849b9ca66557c63b5acf6359 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -1,5 +1,6 @@
 build:ubuntu_cpp:
   stage: build
+  needs: []
   tags:
     - docker
 
@@ -11,12 +12,14 @@ build:ubuntu_cpp:
     - make -j4 all install
 
   artifacts:
+    expire_in: 1 week
     paths:
       - build_cpp/
       - install_cpp/
 
 build:ubuntu_python:
   stage: build
+  needs: []
   tags:
     - docker
 
@@ -27,5 +30,36 @@ build:ubuntu_python:
     - export AIDGE_INSTALL=`pwd`/install
     - python3 -m pip install .
   artifacts:
+    expire_in: 1 week
     paths:
-      - venv/
\ No newline at end of file
+      - venv/
+
+build:windows_cpp:
+  stage: build
+  needs: []
+  tags:
+    - windows
+
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install git -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    - mkdir -p build_cpp
+    - mkdir -p install_cpp
+    - cd build_cpp
+    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
+    - cmake --build . -j2
+    - cmake --install . --config Debug
+
+  artifacts:
+    expire_in: 1 week
+    paths:
+      - build_cpp/
+      - install_cpp/
diff --git a/.gitlab/ci/coverage.gitlab-ci.yml b/.gitlab/ci/coverage.gitlab-ci.yml
index 1977b8aa722c3228862a2aacd5b8ef1fe861bc61..3c7b7654190e0768adc6a904f1cb548f020b0c92 100644
--- a/.gitlab/ci/coverage.gitlab-ci.yml
+++ b/.gitlab/ci/coverage.gitlab-ci.yml
@@ -24,8 +24,10 @@ coverage:ubuntu_python:
   script:
     - source venv/bin/activate
     - python3 -m pip install numpy coverage
-    - cd aidge_core
-    - python3 -m coverage run --source=. -m unittest discover -s unit_tests/ -v -b
+    - cd ${CI_PROJECT_NAME}
+    # Retrieve the installation path of the module, since it is installed with pip.
+    - export MODULE_LOCATION=`python -c "import ${CI_PROJECT_NAME} as _; print(_.__path__[0])"`
+    - python3 -m coverage run --source=$MODULE_LOCATION -m unittest discover -s unit_tests/ -v -b
     - python3 -m coverage report
     - python3 -m coverage xml
   coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
@@ -33,4 +35,4 @@ coverage:ubuntu_python:
     reports:
       coverage_report:
         coverage_format: cobertura
-        path: coverage.xml
+        path: ${CI_PROJECT_NAME}/coverage.xml
diff --git a/.gitlab/ci/static_analysis.gitlab-ci.yml b/.gitlab/ci/static_analysis.gitlab-ci.yml
index f7c09a33a65801fb25b1f20f76eac5a7a7952917..3955b87d4efdd9b3610b661779ab9709320754f2 100644
--- a/.gitlab/ci/static_analysis.gitlab-ci.yml
+++ b/.gitlab/ci/static_analysis.gitlab-ci.yml
@@ -26,8 +26,8 @@ static_analysis:python:
   script:
     - pip install pylint
     - pip install pylint-gitlab
-    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter aidge_core/ > codeclimate.json
-    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter aidge_core/ > pylint.html
+    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter ${CI_PROJECT_NAME}/ > codeclimate.json
+    - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter ${CI_PROJECT_NAME}/ > pylint.html
     - mkdir -p public/python/$CI_COMMIT_REF_NAME
     - mv pylint.html public/python/$CI_COMMIT_REF_NAME/
   artifacts:
diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
index 31e4d3f71b55f4d14373cc7f90071609ad99487a..81e6ca9ac5b868287aa0ef27040c0ead785d3639 100644
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ b/.gitlab/ci/test.gitlab-ci.yml
@@ -5,7 +5,10 @@ test:ubuntu_cpp:
     - docker
   script:
     - cd build_cpp
-    - ctest --output-on-failure
+    - ctest --output-junit ctest-results.xml --output-on-failure
+  artifacts:
+    reports:
+      junit: build_cpp/ctest-results.xml
 
 test:ubuntu_python:
   stage: test
@@ -14,8 +17,32 @@ test:ubuntu_python:
     - docker
   script:
     - source venv/bin/activate
-    - cd aidge_core
+    - cd ${CI_PROJECT_NAME}
+    - python3 -m pip install unittest-xml-reporting
     - python3 -m pip list
-    # Run on discovery all tests located in core/unit_tests/python and discard the stdout 
-    # only to show the errors/warnings and the results of the tests
-    - python3 -m unittest discover -s unit_tests/ -v -b 1> /dev/null
+    # Run on discovery all tests located in core/unit_tests/python
+    - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml
+  artifacts:
+    reports:
+      junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
+
+test:windows_cpp:
+  stage: test
+  needs: ["build:windows_cpp"]
+  tags:
+    - windows
+  image: buildtools
+  before_script:
+    # Install Chocolatey
+    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install python -Y
+    # Update PATH
+    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+  script:
+    - cd build_cpp
+    - ctest --output-junit ctest-results.xml --output-on-failure
+  artifacts:
+    reports:
+      junit: build_cpp/ctest-results.xml
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 67ad9304bc3e682a9436fb52306b3ca8120c1c4b..b764086c8e974dc53aadd345cdd287918d599afb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -52,9 +52,9 @@ target_include_directories(${module_name}
 )
 
 # PYTHON BINDING
-generate_python_binding(${project} ${module_name})
-
 if (PYBIND)
+    generate_python_binding(${project} ${module_name})
+
     # Handles Python + pybind11 headers dependencies
     target_link_libraries(${module_name}
         PUBLIC 
@@ -66,22 +66,12 @@ endif()
 
 target_compile_features(${module_name} PRIVATE cxx_std_14)
 
-
-if(WERROR)
-    target_compile_options(${module_name} PRIVATE
+target_compile_options(${module_name} PRIVATE
     $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-    -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>)
-    target_compile_options(${module_name} PRIVATE
+    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>)
+target_compile_options(${module_name} PRIVATE
     $<$<CXX_COMPILER_ID:MSVC>:
     /W4>)
-else()
-    target_compile_options(${module_name} PRIVATE
-        $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-        -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
-        target_compile_options(${module_name} PRIVATE
-        $<$<CXX_COMPILER_ID:MSVC>:
-        /W4>)
-endif()
 
 if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE)
     append_coverage_compiler_flags()
diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake
index 18f4abc38e2537c3f4d949f08772a57b90758cb0..8030c1a8639e4b7ae0c5fb865e928a4260c6ae7d 100644
--- a/cmake/PybindModuleCreation.cmake
+++ b/cmake/PybindModuleCreation.cmake
@@ -1,23 +1,21 @@
-function(generate_python_binding name target_to_bind) 
-    if (PYBIND)
-        add_definitions(-DPYBIND)
-        Include(FetchContent)
+function(generate_python_binding name target_to_bind)
+    add_definitions(-DPYBIND)
+    Include(FetchContent)
 
-        FetchContent_Declare(
-        PyBind11
-        GIT_REPOSITORY https://github.com/pybind/pybind11.git
-        GIT_TAG        v2.10.4 # or a later release
-        )
+    FetchContent_Declare(
+    PyBind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG        v2.10.4 # or a later release
+    )
 
-        # Use the New FindPython mode, recommanded. Requires CMake 3.15+
-        find_package(Python COMPONENTS Interpreter Development)
-        FetchContent_MakeAvailable(PyBind11)
+    # Use the New FindPython mode, recommanded. Requires CMake 3.15+
+    find_package(Python COMPONENTS Interpreter Development)
+    FetchContent_MakeAvailable(PyBind11)
 
-        message(STATUS "Creating binding for module ${name}")
-        file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
+    message(STATUS "Creating binding for module ${name}")
+    file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp")
 
-        pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
-        target_include_directories(${name} PUBLIC "python_binding")
-        target_link_libraries(${name} PUBLIC ${target_to_bind})        
-    endif()
+    pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install
+    target_include_directories(${name} PUBLIC "python_binding")
+    target_link_libraries(${name} PUBLIC ${target_to_bind})
 endfunction()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 7f32d695a41d954e9f31c6682e3cc6fc0226aed9..13c360796fb4912ffb6b5ad17d68c7b56b38b943 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_IMPORTS_H__
-#define __AIDGE_IMPORTS_H__
+#ifndef AIDGE_IMPORTS_H_
+#define AIDGE_IMPORTS_H_
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
@@ -34,7 +34,8 @@
 #include "aidge/operator/FC.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/Matmul.hpp"
-#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+//#include "aidge/operator/MetaOperator.hpp"
 #include "aidge/operator/Operator.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/ReLU.hpp"
@@ -48,4 +49,4 @@
 //#include "aidge/utilsParsing/AstNode.hpp"
 //#include "aidge/utilsParsing/ParsingToken.hpp"
 
-#endif /* __AIDGE_IMPORTS_H__ */
+#endif /* AIDGE_IMPORTS_H_ */
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 7e022145d1eeaa8a2bd79afe69ca06ca57a62651..d10270b62bb75412a6cbd9203b9b7a3fe220e5aa 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_OPERATORIMPL_H__
-#define __AIDGE_OPERATORIMPL_H__
+#ifndef AIDGE_OPERATORIMPL_H_
+#define AIDGE_OPERATORIMPL_H_
 
 #include <cstddef>
 #include <vector>
@@ -20,7 +20,7 @@ namespace Aidge {
 class OperatorImpl {
 public:
     virtual void forward(){};
-    virtual void backward() {}
+    virtual void backward(){};
 
     /**
      * @brief Minimum amount of data from a specific input required by the
@@ -46,15 +46,21 @@ public:
     virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0;
 
     /**
-     * @brief TOtal amount of produced data ready to be used on a specific output.
+     * @brief Total amount of produced data ready to be used on a specific output.
      *
      * @param outputIdx Index of the output analysed.
      * @return DimSize_t
      */
     virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0;
 
+    /**
+     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     *
+     */
+    virtual void updateConsummerProducer() = 0;
+
     virtual ~OperatorImpl() = default;
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_OPERATORIMPL_H__ */
+#endif /* AIDGE_OPERATORIMPL_H_ */
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 58f2d547e513d540a491155045c463f9a7199578..c56f66fc0b827ccccd9749b9880507dbf48c8179 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_TENSORIMPL_H__
-#define __AIDGE_TENSORIMPL_H__
+#ifndef AIDGE_TENSORIMPL_H_
+#define AIDGE_TENSORIMPL_H_
 
 #include <cstddef>
 #include <cstdio>
@@ -26,7 +26,7 @@ public:
     virtual void setRawPtr(void* /*ptr*/)
     {
         printf("Cannot set raw pointer for backend %s\n", mBackend);
-    };  
+    };
     virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
     constexpr const char *backend() const { return mBackend; }
     virtual ~TensorImpl() = default;
@@ -38,4 +38,4 @@ private:
 
 } // namespace Aidge
 
-#endif /* __AIDGE_TENSORIMPL_H__ */
+#endif /* AIDGE_TENSORIMPL_H_ */
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 4edc4b9a5a9fd877cf9a3e84c7f644be2a11534a..81b7810a8a548df7e5a2829b1a31cbe337491382 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_DATA_H__
-#define __AIDGE_DATA_H__
+#ifndef AIDGE_DATA_H_
+#define AIDGE_DATA_H_
 
 #include "aidge/utils/Parameter.hpp"
 
@@ -66,10 +66,10 @@ template <> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32
 
 template <>
 const char* const EnumStrings<Aidge::DataType>::data[]
-    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary", 
-       "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16", 
-       "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6", 
+    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary",
+       "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
+       "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
        "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
 }
 
-#endif /* __AIDGE_DATA_H__ */
\ No newline at end of file
+#endif /* AIDGE_DATA_H_ */
\ No newline at end of file
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 01e2a5a51d86c28d3a89bd9085c60bfad297623f..c3a6e478f8943253a9f9b3565db2d4452a9ca133 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_DATA_TENSOR_H__
-#define __AIDGE_CORE_DATA_TENSOR_H__
+#ifndef AIDGE_CORE_DATA_TENSOR_H_
+#define AIDGE_CORE_DATA_TENSOR_H_
 
 #include <cstring>
 #include <set>
@@ -156,10 +156,10 @@ class Tensor : public Data,
      * @param dataType Sets the type of inserted data.
      */
     Tensor(DataType dataType = DataType::Float32)
-        : Data(Type), 
-          mDataType(dataType), 
-          mDims({}), 
-          mSize(0), 
+        : Data(Type),
+          mDataType(dataType),
+          mDims({}),
+          mSize(0),
           mSizeM1(0)
     {
         // ctor
@@ -167,14 +167,14 @@ class Tensor : public Data,
 
     /**
      * @brief Construct a new Tensor object copied from another one.
-     * @param otherTensor 
+     * @param otherTensor
      */
     Tensor(const Tensor& otherTensor)
-        : Data(Type), 
-          mDataType(otherTensor.mDataType), 
-          mDims(otherTensor.mDims), 
-          mSize(otherTensor.mSize), 
-          mSizeM1(otherTensor.mSizeM1) 
+        : Data(Type),
+          mDataType(otherTensor.mDataType),
+          mDims(otherTensor.mDims),
+          mSize(otherTensor.mSize),
+          mSizeM1(otherTensor.mSizeM1)
     {
         if (otherTensor.hasImpl()) {
             mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), dataType()})(*this);
@@ -312,7 +312,7 @@ class Tensor : public Data,
 
     /**
      * @brief Assess data type, dimensions, backend and data are the same.
-     * @param otherTensor 
+     * @param otherTensor
      */
     bool operator==(const Tensor &otherTensor) const {
         if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
@@ -325,7 +325,7 @@ class Tensor : public Data,
     /**
      * @brief Set the backend of the Tensor associated implementation
      * @details Create and initialized an implementation if non was associated.
-     * @param name 
+     * @param name
      */
     inline void setBackend(const std::string &name) {
         if (mImpl) {
@@ -342,7 +342,7 @@ class Tensor : public Data,
 
     /**
      * @brief Get a list of available backends.
-     * @return std::set<std::string> 
+     * @return std::set<std::string>
      */
     static std::set<std::string> getAvailableBackends(){
         std::set<std::string> backendsList;
@@ -353,7 +353,7 @@ class Tensor : public Data,
 
     /**
      * @brief Get the data type enum.
-     * @return constexpr DataType 
+     * @return constexpr DataType
      */
     constexpr DataType dataType() const { return mDataType; }
 
@@ -376,27 +376,27 @@ class Tensor : public Data,
 
     /**
      * @brief Get the Impl object
-     * @return constexpr const std::unique_ptr<TensorImpl>& 
+     * @return constexpr const std::unique_ptr<TensorImpl>&
      */
     constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; }
 
     /**
      * @brief Return if an implementaiton has been associated.
-     * @return true 
-     * @return false 
+     * @return true
+     * @return false
      */
     bool hasImpl() const { return (mImpl) ? true : false; }
 
     /**
      * @brief Get number of dimensions of the Tensor.
-     * @return std::size_t 
+     * @return std::size_t
      */
     inline std::size_t nbDims() const { return mDims.size(); }
 
     /**
      * @brief Get dimensions of the Tensor object.
      * @tparam DIM number of dimensions.
-     * @return constexpr std::array<DimSize_t, DIM> 
+     * @return constexpr std::array<DimSize_t, DIM>
      */
     template <DimIdx_t DIM>
     constexpr std::array<DimSize_t, DIM> dims() const {
@@ -406,26 +406,26 @@ class Tensor : public Data,
 
     /**
      * @brief Get dimensions of the Tensor object.
-     * @return constexpr const std::vector<DimSize_t>& 
+     * @return constexpr const std::vector<DimSize_t>&
      */
     constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
 
     /**
      * @brief Get the number of elements in the Tensor object.
-     * @return constexpr std::size_t 
+     * @return constexpr std::size_t
      */
     constexpr std::size_t size() const { return mSize; }
 
     /**
      * @brief Get the number of elements in the N-1 dimensions of the Tensor object.
-     * @return constexpr std::size_t 
+     * @return constexpr std::size_t
      */
     constexpr std::size_t sizeM1() const { return mSizeM1; }
 
     /**
      * @brief Change the shape of the Tensor object according to the given argument.
      * @tparam DIM new dimensions.
-     * @param dims 
+     * @param dims
      */
     template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
     void resize(const std::array<DimSize_t, DIM> &dims) {
@@ -441,8 +441,8 @@ class Tensor : public Data,
 
     /**
      * @brief Return if the Tensor object has at leastone element.
-     * @return true 
-     * @return false 
+     * @return true
+     * @return false
      */
     bool empty() const { return mDims.empty(); }
 
@@ -540,8 +540,8 @@ class Tensor : public Data,
                 }
             }
         }
-        
-        
+
+
         res += "}";
         return res;
     }
@@ -575,10 +575,10 @@ private:
             mSizeM1 = std::accumulate(++mDims.begin(),mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
             mSize = static_cast<std::size_t>(mSizeM1 * mDims[0]);
         }
-        
+
         return mSize;
     }
 };
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_DATA_TENSOR_H__ */
+#endif /* AIDGE_CORE_DATA_TENSOR_H_ */
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index c5dde5c97c61d3661c1ee9cebe7cc17080950eb9..599ca7d6defd729b6e6536dcc95f326d345701d9 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -8,8 +8,8 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
-#ifndef __AIDGE_CORE_GRAPH_CONNECTOR_H__
-#define __AIDGE_CORE_GRAPH_CONNECTOR_H__
+#ifndef AIDGE_CORE_GRAPH_CONNECTOR_H_
+#define AIDGE_CORE_GRAPH_CONNECTOR_H_
 
 #include <cassert>
 #include <memory>
@@ -18,7 +18,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-    
+
 class Node;
 class GraphView;
 /**
@@ -83,4 +83,4 @@ class Connector {
 std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_GRAPH_CONNECTOR_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
\ No newline at end of file
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index e5fa35354968963859d0b4cbbc01139cbc309250..f11136adaaa3d23fa9d3dc5749dd5d6771cbc42c 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -10,8 +10,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
-#define __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
+#ifndef AIDGE_CORE_GRAPH_GRAPHVIEW_H_
+#define AIDGE_CORE_GRAPH_GRAPHVIEW_H_
 
 #include <map>
 #include <memory>
@@ -33,14 +33,14 @@ enum class DataType;
 class GraphView : public std::enable_shared_from_this<GraphView> {
 private:
     /// @brief Name of the graphview
-    std::string mName; 
+    std::string mName;
 
     /// @brief Set of nodes included in the GraphView
-    std::set<NodePtr> mNodes; 
+    std::set<NodePtr> mNodes;
 
     /// @brief Set of nodes included in the graphview with names
     std::map<std::string, NodePtr> mNodeRegistry;
-    
+
     /// @brief Nodes without input link
     std::set<NodePtr> mInputNodes;
 
@@ -49,23 +49,23 @@ private:
 
 public:
     GraphView(std::string name="")
-        : mName(name) 
+        : mName(name)
     {
         // ctor
     }
 
     // GraphView(std::set<NodePtr> nodes, std::string name="")
-    //     : mName(name) 
+    //     : mName(name)
     // {
     //     add(nodes);
     // }
 
-    bool operator==(const GraphView &gv) const 
+    bool operator==(const GraphView &gv) const
     {
         return mNodes == gv.mNodes;
     }
 
-    NodePtr operator[](std::string name) 
+    NodePtr operator[](std::string name)
     {
         assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView.");
         return mNodeRegistry.at(name);
@@ -185,7 +185,7 @@ public:
     /**
      * @brief Get parents Nodes of the specified Node.
      * @param nodeName Name of the Node.
-     * @return std::vector<NodePtr> 
+     * @return std::vector<NodePtr>
      */
     std::vector<NodePtr> getParents(const std::string nodeName) const;
     std::vector<std::vector<NodePtr>> getOrderedParents() const;
@@ -206,9 +206,9 @@ public:
 
     /**
      * @brief Get the Nodes pointed to by the GraphView object.
-     * @return std::set<NodePtr> 
+     * @return std::set<NodePtr>
      */
-    inline std::set<NodePtr> getNodes() const { return mNodes; }
+    inline const std::set<NodePtr>& getNodes() const { return mNodes; }
 
     /**
      * @brief Get the operator with the corresponding name if it is in the
@@ -217,7 +217,7 @@ public:
      * @return NodePtr returns a new empty node if the one asked for
      * was not found.
      */
-    NodePtr getNode(const char *nodeName) const;
+    NodePtr getNode(const std::string& nodeName) const;
 
     /**
      * @brief Remove a Node from the current GraphView scope without affecting its connections.
@@ -233,14 +233,14 @@ public:
     /**
      * @brief Include a Node to the current GraphView object.
      * @param other_Nde Node to add.
-     * @param includeLearnableParam Include non-data inputs, like weights and biases 
+     * @param includeLearnableParam Include non-data inputs, like weights and biases
      * in the GraphView automatically. Default: true.
      */
     void add(NodePtr otherNode, bool includeLearnableParam = true);
     /**
      * @brief Include a set of Nodes to the current GraphView object.
-     * @param otherNodes 
-     * @param includeLearnableParam 
+     * @param otherNodes
+     * @param includeLearnableParam
      */
     void add(std::set<NodePtr> otherNodes,
              bool includeLearnableParam = true);
@@ -326,8 +326,8 @@ public:
     /**
      * @brief Replace the current GraphView with the set of given Nodes if possible
      * @param newNodes Set of Nodes.
-     * @return true 
-     * @return false 
+     * @return true
+     * @return false
      */
     bool replaceWith(std::set<NodePtr> newNodes);
     void updateInputNodes();
@@ -343,13 +343,13 @@ private:
 
     /**
      * @brief Get the sum of the number of dataInput Nodes for all inputNodes of the GraphView object.
-     * @return IOIndex_t 
+     * @return IOIndex_t
      */
     IOIndex_t getNbDataInputs() const;
 
     /**
      * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
-     * @return IOIndex_t 
+     * @return IOIndex_t
      */
     IOIndex_t getNbFreeDataInputs() const;
 
@@ -378,4 +378,4 @@ private:
 };
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_GRAPH_GRAPHVIEW_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */
\ No newline at end of file
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 0780ce9a24da0ceb0c42b32944021f5df2fa9726..340a8318cbd0d59b7710bce7d46b7acd1670dd5b 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_GRAPH_NODE_H__
-#define __AIDGE_CORE_GRAPH_NODE_H__
+#ifndef AIDGE_CORE_GRAPH_NODE_H_
+#define AIDGE_CORE_GRAPH_NODE_H_
 
 #include <cassert>
 #include <memory>
@@ -39,7 +39,7 @@ private:
           // Compare the content of the weak_ptrs
           auto sharedA = a.lock();
           auto sharedB = b.lock();
-          if (!sharedB) return false; // nothing after expired pointer 
+          if (!sharedB) return false; // nothing after expired pointer
           if (!sharedA) return true;
           return sharedA < sharedB; // shared_ptr has a valid comparison operator
       }
@@ -62,7 +62,7 @@ public:
    * @param op Operator giving the Node its number of connections.
    * @param name (optional) name for the Node.
    */
-  Node(std::shared_ptr<Operator> op, const char *name = nullptr);
+  Node(std::shared_ptr<Operator> op, const std::string& name = "");
 
   virtual ~Node() = default;
 
@@ -78,7 +78,7 @@ public:
   /**
    * @brief Functional operator for user-friendly connection interface using an ordered set of Connectors.
    * @param ctors Ordered Connectors linking their associated Node to the input of the current Node with the same index.
-   * @return Connector 
+   * @return Connector
    */
   Connector operator()(const std::vector<Connector> &ctors);
 
@@ -165,7 +165,7 @@ public:
 
   /**
    * @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
-   * 
+   *
    * @param idx Input index.
    * @param tensor Constant Tensor to add as parent for specified index.
    */
@@ -301,9 +301,9 @@ public:
   /**
    * @brief Get the pointer to parent of the specified input index. This pointer is nullptr if no parent is linked.
    * @param inId Input index.
-   * @return std::shared_ptr<Node>& 
+   * @return std::shared_ptr<Node>&
    */
-  inline NodePtr &getParents(const IOIndex_t inId) {
+  inline NodePtr &getParent(const IOIndex_t inId) {
     assert(inId != gk_IODefaultIndex);
     return mParents.at(inId);
   }
@@ -312,7 +312,7 @@ public:
    * @brief Unlink the parent Node at the specified input index and return its pointer.
    * Return a nullptr is no parent was linked.
    * @param inId Input index.
-   * @return std::shared_ptr<Node> 
+   * @return std::shared_ptr<Node>
    */
   NodePtr popParent(const IOIndex_t inId);
 
@@ -331,7 +331,7 @@ public:
   /**
    * @brief Get the list of children Nodes linked to the output at specified index.
    * @param outId Output index.
-   * @return std::vector<std::shared_ptr<Node>> 
+   * @return std::vector<std::shared_ptr<Node>>
    */
   std::vector<NodePtr> getChildren(const IOIndex_t outId) const;
 
@@ -364,8 +364,8 @@ private:
 
   /**
    * @brief Set the idInChildren parameter.
-   * @param inID 
-   * @param newNodeOutID 
+   * @param inID
+   * @param newNodeOutID
    */
   void setInputId(const IOIndex_t inID, const IOIndex_t newNodeOutID);
 
@@ -375,17 +375,17 @@ private:
 
   /**
    * @brief Add the given Node as a child for the current Node.
-   * @param otherNode 
-   * @param outId 
-   * @param otherInId 
+   * @param otherNode
+   * @param outId
+   * @param otherInId
    */
   void addChildOp(NodePtr otherNode, const IOIndex_t outId,
                   const IOIndex_t otherInId);
 
   /**
    * @brief Add the given GraphView's input Node as a child for the current Node
-   * @param otherGraph 
-   * @param outId 
+   * @param otherGraph
+   * @param outId
    * @param otherInId pointer the GraphView's input Node and its input index. Defaults to the
    * only input Node if the GraphView has got one.
    */
@@ -402,4 +402,4 @@ private:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_CORE_GRAPH_NODE_H__ */
+#endif /* AIDGE_CORE_GRAPH_NODE_H_ */
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index dd0cfe1cca8a3f487c18875cff3f90cc56291107..9d1ba6fd1e1df594634bfd93a24663ff178b7ee6 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_GRAPH_OPARGS_H__
-#define __AIDGE_CORE_GRAPH_OPARGS_H__
+#ifndef AIDGE_CORE_GRAPH_OPARGS_H_
+#define AIDGE_CORE_GRAPH_OPARGS_H_
 
 #include <memory>
 #include <cassert>
@@ -30,7 +30,7 @@ private:
 public:
     OpArgs(const std::shared_ptr<GraphView>& view_)
      : mView(view_) {assert(mView && "The GraphView provided should not be a nullptr.");}
-    
+
     OpArgs(const std::shared_ptr<Node>& node_)
      : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
 
@@ -55,7 +55,7 @@ public:
  * @param inputs List of Node and GraphView to link sequentially.
  * @return std::shared_ptr<GraphView> Pointer to the generated view.
  */
-std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
+std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs);
 
 /////////////////////////////
 // Parallel
@@ -65,7 +65,7 @@ std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs);
  * @param inputs List of Node and GraphView to link sequentially.
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
+std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs);
 
 /////////////////////////////
 // Residual
@@ -79,8 +79,8 @@ std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs);
  * @param inputs List of Node and GraphView to link sequentially.
  * @return std::shared_ptr<GraphView> pointer to the generated view.
  */
-std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs);
+std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs);
 
 }
 
-#endif /* __AIDGE_CORE_GRAPH_OPARGS_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */
diff --git a/include/aidge/graphmatching/GRegex.hpp b/include/aidge/graphmatching/GRegex.hpp
index 1292b607cee35f50dc0acc5f5113946be103065e..fd2d0c52ab47e0f03b3307bdbcfcb5a7b81d78d9 100644
--- a/include/aidge/graphmatching/GRegex.hpp
+++ b/include/aidge/graphmatching/GRegex.hpp
@@ -10,8 +10,8 @@
  ********************************************************************************/
 
 
-#ifndef __AIDGE_GREGEX_H__
-#define __AIDGE_GREGEX_H__
+#ifndef AIDGE_GREGEX_H_
+#define AIDGE_GREGEX_H_
 
 #include <stdexcept>    // for exception, runtime_error, out_of_range
 #include <regex>
@@ -43,7 +43,7 @@ public:
     bool walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm);
 
     bool walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm);
-    
+
     std::set<NodeTmp> get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm);
 
     std::vector<SeqStm*> getStmInit() const {
@@ -53,11 +53,11 @@ public:
     StmFactory getStmFab() const {
         return mStmFab;
     }
-    
+
     //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> match(const std::shared_ptr<GraphView> graphToMatch);
     Match match(const std::shared_ptr<GraphView> graphToMatch);
 
 };
 
 }
-#endif //__AIDGE_GREGEX_H__
\ No newline at end of file
+#endif //AIDGE_GREGEX_H_
\ No newline at end of file
diff --git a/include/aidge/graphmatching/Match.hpp b/include/aidge/graphmatching/Match.hpp
index 27acc2e8a0880f8c62d0ba995fcde5479bdcb501..fc617a22869fde6531fba67c8641581572cbffc4 100644
--- a/include/aidge/graphmatching/Match.hpp
+++ b/include/aidge/graphmatching/Match.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_MATCH_H__
-#define __AIDGE_MATCH_H__
+#ifndef AIDGE_MATCH_H_
+#define AIDGE_MATCH_H_
 
 #include <vector>
 #include <set>
@@ -41,4 +41,4 @@ protected:
 };
 
 }
-#endif //__AIDGE_MATCH_H__
\ No newline at end of file
+#endif //AIDGE_MATCH_H_
\ No newline at end of file
diff --git a/include/aidge/graphmatching/NodeRegex.hpp b/include/aidge/graphmatching/NodeRegex.hpp
index 387bfea46f0147613a116beac1f9c6102ed661e5..10ba7225834e4abfb7f0f5cd45ffa91b22f2f87d 100644
--- a/include/aidge/graphmatching/NodeRegex.hpp
+++ b/include/aidge/graphmatching/NodeRegex.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_NODEREGEX_H__
-#define __AIDGE_NODEREGEX_H__
+#ifndef AIDGE_NODEREGEX_H_
+#define AIDGE_NODEREGEX_H_
 #include <cstdlib>
 #include <iostream>
 #include <cstring>
@@ -27,7 +27,7 @@ class NodeRegex
     NodeRegex(const std::string c){
         mCondition = c;
     };
-    
+
     // Version 1 - Only test the type of the node (no need for a lexer)
     // Input : Node_op
     // Output : bool
@@ -38,4 +38,4 @@ class NodeRegex
 
 }
 
-#endif /* ___AIDGE_NODEREGEX_H___ */
\ No newline at end of file
+#endif /* _AIDGE_NODEREGEX_H__ */
\ No newline at end of file
diff --git a/include/aidge/graphmatching/SeqStm.hpp b/include/aidge/graphmatching/SeqStm.hpp
index 6ccd6cfcd322c4d38af2ad04cd2b3a96d839e6cd..0823b5fc0f292d8cf28f7ead53d01bd8dd8adbfe 100755
--- a/include/aidge/graphmatching/SeqStm.hpp
+++ b/include/aidge/graphmatching/SeqStm.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_SEQSTM_H__
-#define __AIDGE_SEQSTM_H__
+#ifndef AIDGE_SEQSTM_H_
+#define AIDGE_SEQSTM_H_
 
 #include <iostream>
 #include <map>
@@ -124,4 +124,4 @@ public:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_SEQSTM_H__ */
\ No newline at end of file
+#endif /* AIDGE_SEQSTM_H_ */
\ No newline at end of file
diff --git a/include/aidge/graphmatching/StmFactory.hpp b/include/aidge/graphmatching/StmFactory.hpp
index 929fdaf3595038f21367768254040c45b291641b..b5850e4a00691ef6c808554a86a6ceec8c38ad19 100644
--- a/include/aidge/graphmatching/StmFactory.hpp
+++ b/include/aidge/graphmatching/StmFactory.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_STMFACTORY_H__
-#define __AIDGE_STMFACTORY_H__
+#ifndef AIDGE_STMFACTORY_H_
+#define AIDGE_STMFACTORY_H_
 
 #include <map>
 #include <utility>
@@ -52,4 +52,4 @@ private:
 };
 }
 
-#endif //__AIDGE_STMFACTORY_H__
\ No newline at end of file
+#endif //AIDGE_STMFACTORY_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 36e592682e61fbc178ed4623f88e9fa5f446f25d..ff3d1888c3bc70b61a3d4da42908d40de2d1d73e 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_ADD_H__
-#define __AIDGE_CORE_OPERATOR_ADD_H__
+#ifndef AIDGE_CORE_OPERATOR_ADD_H_
+#define AIDGE_CORE_OPERATOR_ADD_H_
 
 #include <numeric>
 #include <vector>
@@ -93,14 +93,15 @@ public:
         assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
         return *(mInputs[inputIdx].get());
     }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
-    
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
     inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
         return mInputs[inputIdx];
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "Add Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
@@ -108,8 +109,9 @@ public:
         assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
         return std::static_pointer_cast<Data>(mInputs[inputIdx]);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -139,9 +141,9 @@ public:
 };
 
 template <std::size_t NUM>
-inline std::shared_ptr<Node> Add(const char* name = nullptr) {
+inline std::shared_ptr<Node> Add(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
 }
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_ADD_H__ */
+#endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index a86942d14e531e5974c8924d8dafb8a4d0bebf85..bf76bd45893b43043b81cd6563c500be27c66b42 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
-#define __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
+#ifndef AIDGE_CORE_OPERATOR_AVGPOOLING_H_
+#define AIDGE_CORE_OPERATOR_AVGPOOLING_H_
 
 #include <array>
 #include <numeric>
@@ -46,7 +46,7 @@ public:
     AvgPooling_Op() = delete;
 
     using Parameterizable_ = Parameterizable<AvgPoolingParam,
-                                             std::array<DimSize_t, DIM>, 
+                                             std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1)> >;
     template <AvgPoolingParam e>
@@ -63,8 +63,9 @@ public:
         setDatatype(DataType::Float32);
     }
 
-    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
         mInput = std::dynamic_pointer_cast<Tensor>(data);
@@ -76,7 +77,7 @@ public:
 
             for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            std::floor(static_cast<float>(mInput->dims()[dim+2] - 
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
                                                                     this->template get<AvgPoolingParam::KernelDims>()[dim] +
                                                                     this->template get<AvgPoolingParam::PaddingDims>()[dim] +
                                                                     this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) /
@@ -91,29 +92,34 @@ public:
     bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
 
 
-    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
         assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
         return *(mInput.get());
     }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx == 0 && "AvgPooling Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
         return mInput;
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "AvgPooling Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
 
-    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mInput);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -140,7 +146,7 @@ public:
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
-                                           const char *name = nullptr,
+                                           const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
     // FIXME: properly handle default w&b initialization in every cases
@@ -152,7 +158,7 @@ inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> AvgPooling(
     DimSize_t const (&kernel_dims)[DIM],
-    const char *name = nullptr,
+    const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
@@ -166,4 +172,4 @@ const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
                                                           "KernelDims", "PaddingDims"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_AVGPOOLING_H__ */
+#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 6c64ae44c04f9a8f37d0dde14b251da94ce72a3f..6861c1359737f3f344f0c7d9b2d12c9ff35b88ad 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_BATCHNORM_H__
-#define __AIDGE_CORE_OPERATOR_BATCHNORM_H__
+#ifndef AIDGE_CORE_OPERATOR_BATCHNORM_H_
+#define AIDGE_CORE_OPERATOR_BATCHNORM_H_
 
 #include <array>
 #include <memory>
@@ -53,7 +53,7 @@ public:
           Parameterizable_(param<BatchNormParam::Epsilon>(epsilon),
                            param<BatchNormParam::Momentum>(momentum)),
           mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);        
+        setDatatype(DataType::Float32);
     }
 
     // Data operator[](const char* inputName) override final {
@@ -65,7 +65,7 @@ public:
     //     return *in;
     // }
 
-    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 5 && "operators supports only 5 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
@@ -90,15 +90,16 @@ public:
         assert(inputIdx < 5 && "operators supports only 5 inputs");
         return *(mInputs[inputIdx].get()); }
 
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
     inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 5 && "BatchNorm Operators supports only 5 inputs");
         return mInputs[inputIdx];
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "BatchNorm Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
@@ -107,8 +108,9 @@ public:
         assert(inputIdx < 5 && "operators supports only 5 inputs");
         return std::static_pointer_cast<Data>(mInputs[inputIdx]);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -142,7 +144,7 @@ public:
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
-                                       const char *name = nullptr) {
+                                       const std::string& name = "") {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
     auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
     addProducer(batchNorm, 1, std::array<DimSize_t,0>({}), "scale");
@@ -158,4 +160,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif // __AIDGE_CORE_OPERATOR_BATCHNORM_H__
\ No newline at end of file
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index babeac443dd8d51a8b9d3de5a2e96b8745636060..1edc94b96763cc163646037a8bd069023511df67 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_CONV_H__
-#define __AIDGE_CORE_OPERATOR_CONV_H__
+#ifndef AIDGE_CORE_OPERATOR_CONV_H_
+#define AIDGE_CORE_OPERATOR_CONV_H_
 
 #include <array>
 #include <cmath>
@@ -63,7 +63,7 @@ public:
                            param<ConvParam::KernelDims>(kernel_dims),
                            param<ConvParam::PaddingDims>(padding_dims)),
           mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);        
+        setDatatype(DataType::Float32);
     }
 
     // Data operator[](const char* inputName) override final {
@@ -79,7 +79,7 @@ public:
 
     // }
 
-    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
@@ -114,15 +114,16 @@ public:
     inline Tensor& input(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
     inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 3 && "Conv Operators supports only 3 inputs");
         return mInputs[inputIdx];
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "Conv Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
@@ -131,8 +132,9 @@ public:
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         return std::static_pointer_cast<Data>(mInputs[inputIdx]);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -161,10 +163,10 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t in_channels, 
+inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
                                   DimSize_t out_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
-                                  const char *name = nullptr,
+                                  const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
@@ -182,7 +184,7 @@ inline std::shared_ptr<Node> Conv(
     DimSize_t in_channels,
     DimSize_t out_channels,
     DimSize_t const (&kernel_dims)[DIM],
-    const char *name = nullptr,
+    const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
@@ -197,4 +199,4 @@ const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "Dilati
                                                           "KernelDims", "PaddingDims"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_CONV_H__ */
+#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7cbc609798064e993c7744fdf08865d897518a89..95a2ff55b70dbed9299fb3dca98fb9b0e700d210 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__
-#define __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__
+#ifndef AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_
+#define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_
 
 #include <array>
 #include <cmath>
@@ -49,9 +49,9 @@ class ConvDepthWise_Op : public Operator,
     ConvDepthWise_Op() = delete;
 
     using Parameterizable_ = Parameterizable<ConvDepthWiseParam,
-                                             std::array<DimSize_t, DIM>, 
                                              std::array<DimSize_t, DIM>,
-                                             DimSize_t, 
+                                             std::array<DimSize_t, DIM>,
+                                             DimSize_t,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1) >>;
     template <ConvDepthWiseParam e>
@@ -62,7 +62,7 @@ class ConvDepthWise_Op : public Operator,
                                const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims), 
+          Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims),
                            param<ConvDepthWiseParam::DilationDims>(dilation_dims),
                            param<ConvDepthWiseParam::Channels>(0),
                            param<ConvDepthWiseParam::KernelDims>(kernel_dims),
@@ -71,7 +71,7 @@ class ConvDepthWise_Op : public Operator,
         setDatatype(DataType::Float32);
     }
 
-    constexpr void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
 
@@ -114,15 +114,16 @@ class ConvDepthWise_Op : public Operator,
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         return *(mInputs[inputIdx].get());
     }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
     inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 3 && "ConvDepthWise Operators supports only 3 inputs");
         return mInputs[inputIdx];
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "ConvDepthWise Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
@@ -130,9 +131,10 @@ class ConvDepthWise_Op : public Operator,
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }    
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -163,7 +165,7 @@ class ConvDepthWise_Op : public Operator,
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
-                                           const char *name = nullptr,
+                                           const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                            const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
@@ -178,7 +180,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &ker
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ConvDepthWise(
     DimSize_t const (&kernel_dims)[DIM],
-    const char *name = nullptr,
+    const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
@@ -193,4 +195,4 @@ const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims"
                                                           "KernelDims", "PaddingDims"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__ */
+#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index ebd3a8826dbca292b57f4d3cae749f4e1d7968c8..db92dc9c735416d250fa32e2f9010b21b8f808c0 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_FC_H__
-#define __AIDGE_CORE_OPERATOR_FC_H__
+#ifndef AIDGE_CORE_OPERATOR_FC_H_
+#define AIDGE_CORE_OPERATOR_FC_H_
 
 #include <array>
 #include <cmath>
@@ -57,7 +57,7 @@ public:
         setDatatype(DataType::Float32);
     }
 
-    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         if (inputIdx == 2) {
@@ -75,7 +75,7 @@ public:
             std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
             std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
-            
+
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
         }
@@ -89,15 +89,16 @@ public:
     inline Tensor& input(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& output(const IOIndex_t /*inputIdx*/) const override final { return *(mOutput.get()); }
 
 
     inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 3 && "FC Operators supports only 3 inputs");
         return mInputs[inputIdx];
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "FC Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
@@ -106,8 +107,9 @@ public:
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         return std::static_pointer_cast<Data>(mInputs[inputIdx]);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -137,7 +139,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) {
+inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
     addProducer(fc, 1, {out_channels, 1}, "w");
@@ -152,4 +154,4 @@ const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_FC_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 86b96bfaa8bf0eb5ab52fa542f169708ff8d09ca..dab5df9a8f2d1e7d2cd680703d70e38d564c2564 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__
-#define __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__
+#ifndef AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_
+#define AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_
 
 #include <memory>
 #include <vector>
@@ -85,7 +85,7 @@ class GenericOperator_Op
     std::vector<std::string> getParametersName() { return mParams.getParametersName(); }
 
     // Override Virtual Opertor methods
-    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
         printf("Info: using associateInput() on a GenericOperator.\n");
     }
 
@@ -158,9 +158,9 @@ class GenericOperator_Op
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
 inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
-                                             const char *name = nullptr) {
+                                             const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
 }
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__ */
+#endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index ed967001a23a6b9dd4cfe5db09ec4f1edd60e5ea..1dff2550a42245351afab5b8bb1a708a8d0d8c0b 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_LEAKYRELU_H__
-#define __AIDGE_CORE_OPERATOR_LEAKYRELU_H__
+#ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_
+#define AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 
 #include <vector>
 #include <memory>
@@ -53,8 +53,9 @@ public:
         setDatatype(DataType::Float32);
     }
 
-    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
@@ -69,26 +70,30 @@ public:
     }
 
 
-    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
         return mInput;
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "LeakyReLU Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
 
-    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mInput);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
@@ -112,7 +117,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) {
+inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
 }
@@ -124,4 +129,4 @@ const char* const EnumStrings<Aidge::LeakyReLUParam>::data[]
     = {"NegativeSlope"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp
index a871fe516c95802fdb67e81ca3f58fb3be4dce25..639b366912060b3e085510f312d94568e6b65f03 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/Matmul.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_MATMUL_H__
-#define __AIDGE_CORE_OPERATOR_MATMUL_H__
+#ifndef AIDGE_CORE_OPERATOR_MATMUL_H_
+#define AIDGE_CORE_OPERATOR_MATMUL_H_
 
 #include <array>
 #include <cmath>
@@ -55,7 +55,7 @@ public:
         setDatatype(DataType::Float32);
     }
 
-    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx < 2 && "operators supports only 2 inputs");
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
@@ -67,7 +67,7 @@ public:
             std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
             // <out_channels, batch>
             std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
-            
+
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
         }
@@ -81,15 +81,16 @@ public:
     inline Tensor& input(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 2 && "operators supports only 2 inputs");
         return *(mInputs[inputIdx].get()); }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
     inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 2 && "MatMul Operators has 2 inputs");
         return mInputs[inputIdx];
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "MatMul Operators has 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
@@ -98,8 +99,9 @@ public:
         assert(inputIdx < 2 && "operators supports only 2 inputs");
         return std::static_pointer_cast<Data>(mInputs[inputIdx]);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -127,7 +129,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) {
+inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
     addProducer(matmul, 1, {1, out_channels}, "w");
@@ -140,4 +142,4 @@ template <>
 const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR__MATMUL_H__ */
+#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..073243e801c6e1297129424b0c93b1a7c4f112f3
--- /dev/null
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -0,0 +1,174 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_
+
+#include <array>
+#include <numeric>
+#include <vector>
+#include <cmath>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims };
+
+template <DimIdx_t DIM>
+class MaxPooling_Op : public Operator,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Parameterizable<MaxPoolingParam,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, DIM>,
+                                       std::array<DimSize_t, (DIM<<1) >> {
+private:
+    // FIXME: change accessibility
+    std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
+    const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>();
+
+public:
+    static constexpr const char *Type = "MaxPooling";
+
+    MaxPooling_Op() = delete;
+
+    using Parameterizable_ = Parameterizable<MaxPoolingParam,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, DIM>,
+                                             std::array<DimSize_t, (DIM<<1)> >;
+    template <MaxPoolingParam e>
+    using param = typename Parameterizable_::template param<e>;
+
+    constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims,
+                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0))
+        : Operator(Type),
+          Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims),
+                           param<MaxPoolingParam::KernelDims>(kernel_dims),
+                           param<MaxPoolingParam::PaddingDims>(padding_dims)),
+          mOutput(std::make_shared<Tensor>()) {
+        setDatatype(DataType::Float32);
+    }
+
+    constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+        assert(inputIdx < 1 && "operators supports only 3 inputs");
+        (void) inputIdx; // avoid unused warning
+        assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type");
+
+        mInput = std::dynamic_pointer_cast<Tensor>(data);
+    }
+
+    constexpr void computeOutputDims() override final {
+        if (!mInput->empty()) {
+            std::array<DimSize_t, DIM + 2> outputDims = {};
+
+            for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) {
+                outputDims[dim+2] = 1 + static_cast<DimSize_t>(
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
+                                                                    this->template get<MaxPoolingParam::KernelDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim] +
+                                                                    this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) /
+                                            static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim])));
+            }
+            outputDims[1] = mInput->dims()[1];
+            outputDims[0] = mInput->dims()[0];
+            mOutput->resize(outputDims);
+        }
+    }
+
+    bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
+
+
+    inline Tensor& input(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return *(mInput.get());
+    }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
+
+
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return mInput;
+    }
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs");
+        (void) outputIdx; // avoid unused warning
+        return mOutput;
+    }
+
+
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
+        assert(inputIdx == 0 && "operators supports only 1 inputs");
+        (void) inputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mInput);
+    }
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
+        assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
+        return std::static_pointer_cast<Data>(mOutput);
+    }
+
+
+    void setBackend(const std::string &name) {
+        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        mOutput->setBackend(name);
+
+        // FIXME: temporary workaround
+        mInput->setBackend(name);
+    }
+
+    void setDatatype(const DataType &datatype) {
+        mOutput->setDatatype(datatype);
+
+        // FIXME: temporary workaround
+        mInput->setDatatype(datatype);
+    }
+
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims,
+                                           const std::string& name = "",
+                                           const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+                                           const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name);
+    return avgPool;
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> MaxPooling(
+    DimSize_t const (&kernel_dims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
+    return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 7fa1a20449d055da9cd25e6dc4f987757aca3f4a..35a59b56cbf5c10a78116f81de96a8baddc03ff0 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_METAOPERATOR_H__
-#define __AIDGE_CORE_OPERATOR_METAOPERATOR_H__
+#ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_
+#define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
 #include "aidge/operator/Operator.hpp"
 
@@ -25,4 +25,4 @@ public:
 };
 }
 
-#endif /* MetaOperator_H__ */
+#endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 3dc25f2dc9e3888adfbe557d057e23a3f08a414e..5e0289541a93df3020467667bafcb3eb9248977f 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_OPERATOR_H__
-#define __AIDGE_CORE_OPERATOR_OPERATOR_H__
+#ifndef AIDGE_CORE_OPERATOR_OPERATOR_H_
+#define AIDGE_CORE_OPERATOR_OPERATOR_H_
 
 #include <memory>
 #include <string>
@@ -69,26 +69,28 @@ public:
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
      * @param inputIdx Index of the input analysed.
-     * @return NbElts_t 
+     * @return NbElts_t
      */
     NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data from a specific input actually used in one computation pass.
-     * 
+     *
      * @param inputIdx Index of the input analysed.
-     * @return NbElts_t 
+     * @return NbElts_t
      */
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data ready to be used on a specific output.
-     * 
+     *
      * @param outputIdx Index of the output analysed.
-     * @return NbElts_t 
+     * @return NbElts_t
      */
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
+    void updateConsummerProducer();
+
     virtual void forward();
 
     virtual void backward();
@@ -107,4 +109,4 @@ public:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_CORE_OPERATOR_OPERATOR_H__ */
+#endif /* AIDGE_CORE_OPERATOR_OPERATOR_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 4d5461957826e9ebea4a39bb9a7618604e80797a..acdc69b69ab86b25a11d889980b9236e41928316 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_PRODUCER_H__
-#define __AIDGE_CORE_OPERATOR_PRODUCER_H__
+#ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_
+#define AIDGE_CORE_OPERATOR_PRODUCER_H_
 
 #include <array>
 #include <vector>
@@ -51,39 +51,41 @@ public:
         setDatatype(tensor->dataType());
     }
 
-    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, __attribute__((unused)) std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t /*inputIdx*/, std::shared_ptr<Data> /*data*/) override final {
         assert(false && "Producer operator takes no input");
     }
 
-    constexpr void computeOutputDims() override final {}
+    void computeOutputDims() override final {}
 
-    constexpr bool outputDimsForwarded() const override final {return true;}
+    bool outputDimsForwarded() const override final {return true;}
 
 
-    [[noreturn]] inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    [[noreturn]] inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final {
       assert(false);
       exit(-1);
     }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t /*inputIdx*/) const override final {
       assert(false && "Producer Operator has no input");
       return nullptr;
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
       assert((outputIdx == 0) && "Producer Operator has only 1 output");
+      (void) outputIdx; // avoid unused warning
       return mOutput;
     }
 
 
-    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    std::shared_ptr<Data> getRawInput(const IOIndex_t /*inputIdx*/) const override final {
         assert(false && "Producer operator takes no input");
         return nullptr;
     }
 
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -111,34 +113,34 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const char *name = nullptr) {
+inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "") {
   static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
   return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
 }
 
 template <std::size_t DIM>
-inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const char *name = nullptr) {
+inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") {
   return Producer(to_array(dims), name);
 }
 
-inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const char *name = nullptr) {
+inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "") {
   return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor), name);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) {
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
     assert(inputIdx != gk_IODefaultIndex);
     static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
-    const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str();
+    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
     auto prod = Producer(dims, prodName);
     prod->addChild(otherNode, 0, inputIdx);
     otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
 }
 
 template <std::size_t DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) {
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
     addProducer(otherNode, inputIdx, to_array(dims), extension);
 }
 } // namespace Aidge
 
-#endif /* __AIDGE_CORE_OPERATOR_PRODUCER_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 93bc9a74091c2893dc7b1f7fcc34c72828f34f27..141bd3ae12c7875a90d2549a24e5c141f3ff6aba 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_RELU_H__
-#define __AIDGE_CORE_OPERATOR_RELU_H__
+#ifndef AIDGE_CORE_OPERATOR_RELU_H_
+#define AIDGE_CORE_OPERATOR_RELU_H_
 
 #include <cassert>
 #include <memory>
@@ -42,8 +42,9 @@ public:
         setDatatype(DataType::Float32);
     }
 
-    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
@@ -58,26 +59,30 @@ public:
     }
 
 
-    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert((inputIdx == 0) && "ReLU Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
         return mInput;
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "ReLU Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
 
-    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mInput);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -101,10 +106,10 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> ReLU(const char* name = nullptr) {
+inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
 }
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 9be2acde8570bdc250054e9bed7a1b0d5c3e52ff..64e713b331bbbbf612ee5102ba0ea82fb108350e 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_SOFTMAX_H__
-#define __AIDGE_CORE_OPERATOR_SOFTMAX_H__
+#ifndef AIDGE_CORE_OPERATOR_SOFTMAX_H_
+#define AIDGE_CORE_OPERATOR_SOFTMAX_H_
 
 #include <cassert>
 #include <memory>
@@ -42,8 +42,9 @@ public:
         setDatatype(DataType::Float32);
     }
 
-    void associateInput(__attribute__((unused)) const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
+    void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
         assert(strcmp(data->type(), Tensor::Type)==0 && "input data must be of Tensor type");
         mInput = std::dynamic_pointer_cast<Tensor>(data);
     }
@@ -58,26 +59,30 @@ public:
     }
 
 
-    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { return *(mInput.get()); }
-    inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
+    inline Tensor& input(const IOIndex_t /*inputIdx*/) const override final { return *(mInput.get()); }
+    inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+    inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert((inputIdx == 0) && "Softmax Operator has only 1 input");
+        (void) inputIdx; // avoid unused warning
         return mInput;
     }
-    inline std::shared_ptr<Tensor> getOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final {
         assert((outputIdx == 0) && "Softmax Operator has only 1 output");
+        (void) outputIdx; // avoid unused warning
         return mOutput;
     }
 
 
-    std::shared_ptr<Data> getRawInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+    std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx == 0 && "operator supports only 1 input");
+        (void) inputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mInput);
     }
-    std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
+    std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
+        (void) outputIdx; // avoid unused warning
         return std::static_pointer_cast<Data>(mOutput);
     }
 
@@ -101,10 +106,10 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> Softmax(const char* name = nullptr) {
+inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name);
 }
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_SOFTMAX_H__ */
+#endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 2abe90e111c0997928d270b149a6ab4a460eb3aa..9916ee2004bd1aa9f33acf96d95cae4703f692df 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_SCHEDULER_H__
-#define __AIDGE_SCHEDULER_H__
+#ifndef AIDGE_SCHEDULER_H_
+#define AIDGE_SCHEDULER_H_
 
 #include <chrono>
 #include <memory>
@@ -43,6 +43,8 @@ public:
     };
     ~SequentialScheduler() = default;
 
+    void generateScheduling(bool verbose = false);
+
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
@@ -54,18 +56,45 @@ public:
      */
     void saveSchedulingDiagram(const std::string& fileName) const;
 
+    /**
+     * @brief Return a vector of Node ordered by the order they are called by the scheduler
+     *
+     * @return std::vector<std::shared_ptr<Node>>
+     */
+    std::vector<std::shared_ptr<Node>> getStaticScheduling(){
+        return mStaticSchedule;
+    }
+
 private:
     /**
      * @brief Set of layers receiving an input from currently processing layers
-     * 
+     *
      * @param producers Set of layers ready to run.
-     * @return std::set<std::shared_ptr<Node>> 
+     * @return std::set<std::shared_ptr<Node>>
      */
     std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
 
+    /**
+     * @brief Shared ptr to the scheduled graph view
+     *
+     */
     std::shared_ptr<GraphView> mGraphView;
+    /**
+     * @brief List of SchedulingElement (i.e: Nodes with their computation time)
+     *
+     */
     std::vector<SchedulingElement> mScheduling;
+    /**
+     * @brief List of nodes ordered by their
+     *
+     */
+    std::vector<std::shared_ptr<Node>> mStaticSchedule;
+    /**
+     * @brief Number of computation node (i.e: nb nodes != Producer)
+     *
+     */
+    std::size_t mComputationNumber = 0; // TODO: Check if not inferable from mStaticSchedule
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_SCHEDULER_H__ */
\ No newline at end of file
+#endif /* AIDGE_SCHEDULER_H_ */
diff --git a/include/aidge/utils/CParameter.hpp b/include/aidge/utils/CParameter.hpp
index 64943ff58eae9a06fe50afb1b81deea1b66e90ea..0f4c74ab8bccb7bc134e035a5f12d31d51663e5d 100644
--- a/include/aidge/utils/CParameter.hpp
+++ b/include/aidge/utils/CParameter.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CPARAMETER_H__
-#define __AIDGE_CPARAMETER_H__
+#ifndef AIDGE_CPARAMETER_H_
+#define AIDGE_CPARAMETER_H_
 
 #include <assert.h>
 #include <map>
@@ -112,4 +112,4 @@ private:
 
 }
 
-#endif /* __AIDGE_CPARAMETER_H__ */
+#endif /* AIDGE_CPARAMETER_H_ */
diff --git a/include/aidge/utils/Parameter.hpp b/include/aidge/utils/Parameter.hpp
index 6a8fcca41ff03951eeac80493cd9f86a2ea3586b..b0c6e35950187f17d991cfe5b2c9bd2b09f1e70f 100644
--- a/include/aidge/utils/Parameter.hpp
+++ b/include/aidge/utils/Parameter.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_UTILS_PARAMETER_H__
-#define __AIDGE_CORE_UTILS_PARAMETER_H__
+#ifndef AIDGE_CORE_UTILS_PARAMETER_H_
+#define AIDGE_CORE_UTILS_PARAMETER_H_
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -40,23 +40,23 @@ constexpr std::size_t size(T (&)[N]) { return N; }
 #ifdef PYBIND
 /* This abstract class allows to avoid binding Parametrizable.
 *  Otherwise we would need to bind every template possible of Parametrizable.
-*  Every operators can access the methods of this class by inheriting from 
+*  Every operators can access the methods of this class by inheriting from
 *  PyAbstractParametrizable in the binding code.
 */
-class PyAbstractParametrizable{ 
+class PyAbstractParametrizable{
     public:
         /* Bindable get function, does not recquire any templating.
         *  This is thanks to py::object which allow the function to
         *  be agnostic from its return type.
         */
         virtual py::object getPy(const char* /*name*/) = 0;
-}; 
+};
 #endif
 
 template <class PARAM_ENUM, class ...T>
 class Parameterizable
 #ifdef PYBIND
-    : public PyAbstractParametrizable 
+    : public PyAbstractParametrizable
 #endif
     {
 public:
@@ -99,7 +99,7 @@ public:
     constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
         return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
     }
-    
+
     template <PARAM_ENUM paramEnum>
     constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
         return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
@@ -194,4 +194,4 @@ private:
 };
 }
 
-#endif /* AIDGE_CORE_UTILS_PARAMETER_H__ */
+#endif /* AIDGE_CORE_UTILS_PARAMETER_H_ */
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/utils/Recipies.hpp
index d6104c56ce288d260ac78c5eb9d1e83d75ca34c8..4cbf8fd284bef314dbe28b19ebdae05172467bad 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/utils/Recipies.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_UTILS_RECIPIES_H__
-#define __AIDGE_CORE_UTILS_RECIPIES_H__
+#ifndef AIDGE_CORE_UTILS_RECIPIES_H_
+#define AIDGE_CORE_UTILS_RECIPIES_H_
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
@@ -24,4 +24,4 @@ void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
 }
 
 
-#endif /* __AIDGE_CORE_UTILS_RECIPIES_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
\ No newline at end of file
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 8348eb98d3f3ab4da0873c8b3f4a476a9f8e1afc..98749c1349bad644dee2c1a8549559939791f71c 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_UTILS_REGISTRAR_H__
-#define __AIDGE_CORE_UTILS_REGISTRAR_H__
+#ifndef AIDGE_CORE_UTILS_REGISTRAR_H_
+#define AIDGE_CORE_UTILS_REGISTRAR_H_
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -68,8 +68,8 @@ struct Registrar {
         for(auto keyValue : C::registry())
             keys.push_back(keyValue.first);
         return keys;
-    }    
+    }
 };
 }
 
-#endif // __AIDGE_CORE_UTILS_REGISTRAR_H__
\ No newline at end of file
+#endif //AIDGE_CORE_UTILS_REGISTRAR_H_
\ No newline at end of file
diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h
index d05c64ead0e147a8d66c7f40dbd978283401683a..d65279f1f4d36498ea7653428332690fc99a5def 100644
--- a/include/aidge/utils/Types.h
+++ b/include/aidge/utils/Types.h
@@ -10,8 +10,8 @@
  ********************************************************************************/
 
 
-#ifndef __AIDGE_TYPES_H__
-#define __AIDGE_TYPES_H__
+#ifndef AIDGE_TYPES_H_
+#define AIDGE_TYPES_H_
 
 #include <limits>
 #include <type_traits>
@@ -59,4 +59,4 @@ constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max() - 1;
 
 } // namespace Aidge
 
-#endif // __AIDGE_TYPES_H__
\ No newline at end of file
+#endif //AIDGE_TYPES_H_
\ No newline at end of file
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 3f741946da59a118b023f0204da4f42231c1416d..d6442723ecc79527e8eaa7d3e03a466c085dfa58 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -98,9 +98,9 @@ void init_Tensor(py::module& m){
     .def_buffer([](Tensor& b) -> py::buffer_info {
         const std::unique_ptr<TensorImpl>& tensorImpl = b.getImpl();
 
-        std::vector<ssize_t> dims;
-        std::vector<ssize_t> strides;
-        ssize_t stride = tensorImpl->scalarSize();
+        std::vector<size_t> dims;
+        std::vector<size_t> strides;
+        size_t stride = tensorImpl->scalarSize();
 
         for (unsigned int dim = b.nbDims(); dim > 0; dim--) {
             dims.push_back(b.dims()[dim-1]);
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 62b86982053d82bef6e0fd80e490632b95b968e5..e3666d247324fc419570611f41bbe67c7c68cc4e 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -136,6 +136,16 @@ void init_Node(py::module& m) {
             :rtype: int
             )mydelimiter")
 
+            .def("get_parents", &Node::getParents,
+            R"mydelimiter(
+            Get parents.
+            )mydelimiter")
+
+            .def("get_children", (std::set<std::shared_ptr<Node>> (Node::*)() const) &Node::getChildren,
+            R"mydelimiter(
+            Get children.
+            )mydelimiter")
+
             .def("__call__", &Node::operator(), py::arg("connectors"));
 }
 }  // namespace Aidge
diff --git a/python_binding/graph/pybind_OpArgs.cpp b/python_binding/graph/pybind_OpArgs.cpp
index 305c0b73101a97c242413ff84a5ae099764e7e77..6ea89f91945ac44f2142c5b9e8440b11ec6a1663 100644
--- a/python_binding/graph/pybind_OpArgs.cpp
+++ b/python_binding/graph/pybind_OpArgs.cpp
@@ -10,19 +10,20 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
 #include "aidge/graph/OpArgs.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
-#include <pybind11/stl.h>
-#include <pybind11/complex.h>
-#include <pybind11/functional.h>
-#include <pybind11/chrono.h>
+
 
 
 namespace py = pybind11;
 namespace Aidge {
 void init_OpArgs(py::module& m){
     py::class_<OpArgs, std::shared_ptr<OpArgs>>(m, "OpArgs")
+    .def(py::init<const std::shared_ptr<GraphView>&>(), py::arg("view_"))
+    .def(py::init<const std::shared_ptr<Node>&>(), py::arg("node_"))
     .def("node", &OpArgs::node)
     .def("view", &OpArgs::view)
     ;
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index d7099e3856d48262f0f4bbacf025f5a960a220fa..3efcf7c5345bbc835aeaf6dcbc416769b8654439 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -23,7 +23,7 @@ namespace Aidge {
 template <std::size_t NUM> void declare_Add(py::module &m) {
   py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "Add_Op", py::multiple_inheritance());
 
-  m.def("Add", &Add<NUM>, py::arg("name") = nullptr);
+  m.def("Add", &Add<NUM>, py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 66dadba7244a199bd4ca8a0dd814f20a8049a62f..ecbb743d33cc5750bc60aeed8e5207dcec0c23dc 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -37,10 +37,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("padding_dims"));
   
-  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, 
-                                                                  const char* name,
-                                                                  std::vector<DimSize_t> &stride_dims,
-                                                                  std::vector<DimSize_t> &padding_dims) {
+  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -69,7 +69,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
         return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
     }, py::arg("kernel_dims"),
-       py::arg("name") = nullptr,
+       py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
   
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 52578c55ac0e3e1112bdbedc15bbaa3e155d9b44..70d9bce003033e1264ac39764271773fa84c760f 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -24,7 +24,7 @@ template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, PyAbstractParametrizable>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
 
-    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = nullptr);
+    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
 
 void init_BatchNorm(py::module &m) {
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 3cf5d818f9b6e3bdfaf9a2d0b74ec0480beb6967..7e366305f287e958ea7500695c1f3285908017b1 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -44,11 +44,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
-                                                         std::vector<DimSize_t>& kernel_dims,
-                                                         const char* name, 
-                                                         std::vector<DimSize_t> &stride_dims,
-                                                         std::vector<DimSize_t> &padding_dims,
-                                                         std::vector<DimSize_t> &dilation_dims) {
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::string& name, 
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
     }, py::arg("in_channels"),
        py::arg("out_channels"),
        py::arg("kernel_dims"),
-       py::arg("name") = nullptr,
+       py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index b64409bdbb5f094e85cb094017a6fb837893a2db..8a81e7ba184536cbd535db24519495400bce6fdb 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -39,11 +39,11 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("padding_dims"),
         py::arg("dilation_dims"));
   
-  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, 
-                                                                  const char* name,
-                                                                  std::vector<DimSize_t> &stride_dims,
-                                                                  std::vector<DimSize_t> &padding_dims,
-                                                                  std::vector<DimSize_t> &dilation_dims) {
+  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims,
+                                                                  const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -80,7 +80,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
         return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
     }, py::arg("kernel_dims"),
-       py::arg("name") = nullptr,
+       py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 82eaa0062b7db0e57da3d78d56e503e3a4beb19f..3b4137c6f208f96d256c72300437cc978658b84f 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -23,7 +23,7 @@ namespace Aidge {
 void declare_FC(py::module &m) {
   py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, PyAbstractParametrizable>(m, "FC_Op", py::multiple_inheritance());
 
-  m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = nullptr);
+  m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
 
 void init_FC(py::module &m) {
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 578d2ccd2ed143c3f9a67c0430c12aa7214cb8dc..bec59eaf2cecdc7f64d1da07580116c4b3334992 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -22,7 +22,7 @@ namespace Aidge {
 void init_GenericOperator(py::module& m) {
     py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
-            .def("get_parameter_type", &GenericOperator_Op::getParameterType)
+    .def("get_parameter_type", &GenericOperator_Op::getParameterType)
     .def("get_parameters_name", &GenericOperator_Op::getParametersName)
     .def("add_parameter", &GenericOperator_Op::addParameter<bool>)
     .def("add_parameter", &GenericOperator_Op::addParameter<int>)
@@ -34,10 +34,10 @@ void init_GenericOperator(py::module& m) {
     .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<std::string>>)
     .def("get_parameter", [](GenericOperator_Op& self, std::string key) -> py::object {
         /*
-        This getParameter method returns the good python type without having to have 
+        This getParameter method returns the good python type without having to have
         prior knowledge of the parameter type.
         */
-        py::object res = py::none(); 
+        py::object res = py::none();
         std::string paramType = self.getParameterType(key);
         if(paramType == typeid(int).name())
             res = py::cast(self.getParameter<int>(key));
@@ -62,6 +62,6 @@ void init_GenericOperator(py::module& m) {
     });
 
     m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"),
-          py::arg("name") = nullptr);
+          py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 27a292f0baf2673f3d963f3c3b9a69892c4c6521..c062d93f5c40fe46336fe34f6d1664f24da07732 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -21,6 +21,6 @@ namespace Aidge {
 void init_LeakyReLU(py::module& m) {
     py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, PyAbstractParametrizable>(m, "LeakyReLU_Op", py::multiple_inheritance());
 
-    m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = nullptr);
+    m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index c81845ca5e5ba3674356d16db660f4e3550e9004..b6ae27289fabe1fe4dbeea60704a61373bc850cf 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -23,7 +23,7 @@ namespace Aidge {
 void declare_Matmul(py::module &m) {
   py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance());
 
-  m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = nullptr);
+  m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = "");
 }
 
 void init_Matmul(py::module &m) {
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9bd951c446e080ff27b099527ac9bbc350646140
--- /dev/null
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -0,0 +1,89 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+#ifdef PYBIND
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/utils/Parameter.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>(
+    m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    py::multiple_inheritance())
+  .def(py::init<const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, DIM> &,
+                const std::array<DimSize_t, (DIM<<1)> &>(),
+        py::arg("kernel_dims"),
+        py::arg("stride_dims"),
+        py::arg("padding_dims"));
+  
+  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims) {
+        // Lambda function wrapper because PyBind fails to convert const array.
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        if (kernel_dims.size() != DIM) {
+            throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (stride_dims.size() != DIM) {
+            throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
+        }
+        if (padding_dims.size() != (DIM<<1)) {
+            throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]");
+        }
+        DimSize_t tmp_kernel_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_kernel_dims_array[i] = kernel_dims[i];
+        }
+        DimSize_t tmp_stride_dims_array[DIM];
+        for (size_t i = 0; i < DIM; ++i) {
+            tmp_stride_dims_array[i] = stride_dims[i];
+        }
+        DimSize_t tmp_padding_dims_array[DIM<<1];
+        for (size_t i = 0; i < (DIM<<1); ++i) {
+            tmp_padding_dims_array[i] = padding_dims[i];
+        }
+        const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array;
+        const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array;
+        const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
+        return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
+    }, py::arg("kernel_dims"),
+       py::arg("name") = "",
+       py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
+       py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
+  
+}
+
+
+void init_MaxPooling(py::module &m) {
+  declare_MaxPoolingOp<1>(m);
+  declare_MaxPoolingOp<2>(m);
+  declare_MaxPoolingOp<3>(m);
+ 
+  // FIXME:
+  // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
+  // (&)[1])>(&MaxPooling));
+}
+} // namespace Aidge
+#endif
\ No newline at end of file
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 5757891a30c5b40dcfa5ff99b1f06e00376f475a..ea9880800059e8993996e67138f89419c165fc4f 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -25,7 +25,7 @@ namespace Aidge {
 template <DimIdx_t DIM>
 void declare_Producer(py::module &m) {
     // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
-    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const char*)>(&Producer), py::arg("dims"), py::arg("name") = nullptr);
+    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
     
 }
 
@@ -36,7 +36,7 @@ void init_Producer(py::module &m) {
         "ProducerOp", 
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims);
-    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const char*)>(&Producer), py::arg("tensor"), py::arg("name") = nullptr);
+    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
     
     declare_Producer<1>(m);
     declare_Producer<2>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index e0d34d5a91a4ed1fcb8507198eb222b2d02e4e26..820589d76507b39ca65ac2397614aabd1221fe3e 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -20,6 +20,6 @@ namespace Aidge {
 void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLU_Op", py::multiple_inheritance());
 
-    m.def("ReLU", &ReLU, py::arg("name") = nullptr);
+    m.def("ReLU", &ReLU, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 13ba96ade4f5c5d132274e457efa5b4edcd3dc78..72ac1107181c1d7e2f578e31a965636dbb5c111b 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -21,6 +21,6 @@ namespace Aidge {
 void init_Softmax(py::module& m) {
     py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "Softmax_Op", py::multiple_inheritance());
 
-    m.def("Softmax", &Softmax, py::arg("name") = nullptr);
+    m.def("Softmax", &Softmax, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index b861f881c684a2fbe800ab672299871cfc89d7ac..78418d51a5c410cb56bb8421fd7f3dc6ec6d32db 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -29,6 +29,7 @@ void init_FC(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_Matmul(py::module&);
+void init_MaxPooling(py::module&);
 void init_Producer(py::module&);
 void init_ReLU(py::module&);
 void init_Softmax(py::module&);
@@ -75,6 +76,7 @@ void init_Aidge(py::module& m){
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_Matmul(m);
+    init_MaxPooling(m);
     init_ReLU(m);
     init_Softmax(m);
 
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index 2490d5c55a497223b13bceee6772c2dd44e733ef..85479d41f51e74dee4079e78a37e7f3a520639e2 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -10,6 +10,7 @@
  ********************************************************************************/
 
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/graph/GraphView.hpp"
 
@@ -20,6 +21,8 @@ void init_Scheduler(py::module& m){
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
     .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
+    .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
+    .def("get_static_scheduling", &SequentialScheduler::getStaticScheduling)
     ;
 }
 }
diff --git a/setup.ps1 b/setup.ps1
new file mode 100644
index 0000000000000000000000000000000000000000..61324cf4a7d64094f5ead498adf64719c3290f06
--- /dev/null
+++ b/setup.ps1
@@ -0,0 +1,52 @@
+# Helper setup tool to automatically build aidge_core on Windows.
+
+# Requirements
+################################################################################
+# You have either VS BuildTools or VS Community already present on your 
+# system, with the build tools installed.
+# If not, download Visual Studio Community here:
+# https://visualstudio.microsoft.com/fr/vs/community/
+# Make sure to install the "Desktop Development with C++" workload.
+# Run this script in a Powershell console with Administrator rights in order to
+# automatically install the dependencies, or just execute the second part if you
+# already have all the dependencies satisfied.
+
+# Enable or disable automatic installation of requirements
+# Run .\setup.ps1 -install_reqs:$false to disable it
+param ([bool]$install_reqs=$true)
+
+# Default install path is .\install_cpp
+if (-not $env:AIDGE_INSTALL_PATH)
+{
+    $env:AIDGE_INSTALL_PATH = $(Join-Path $pwd install_cpp)
+}
+
+# 1. Setup environment
+################################################################################
+if ($install_reqs)
+{
+    # Install Chocolatey
+    Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+    # Install dependencies
+    choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    choco install git -Y
+    choco install python -Y
+    # Update PATH
+    $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+}
+
+# 2. Compile & install aidge_core
+################################################################################
+mkdir -Force build_cpp
+mkdir -Force $env:AIDGE_INSTALL_PATH
+Set-Location build_cpp
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$env:AIDGE_INSTALL_PATH -DCMAKE_BUILD_TYPE=Debug ..
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+cmake --build . -j2
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+cmake --install . --config Debug
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+# Optional: run the unit tests
+ctest --output-on-failure
+if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError }
+Set-Location $PSScriptRoot
diff --git a/src/graph/Connector.cpp b/src/graph/Connector.cpp
index f189b92b24cc5529ae8fb6d8c9faac97e296a92c..cd2ceff8b58076a5054269e4676120b94c8b5beb 100644
--- a/src/graph/Connector.cpp
+++ b/src/graph/Connector.cpp
@@ -39,7 +39,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ct
             graph->add(nodesToAdd.back());  // only add, connection already done
                                             // between nodes
             std::vector<std::shared_ptr<Node>> parents = nodesToAdd.back()->getParents();
-            std::set<std::shared_ptr<Node>> alreadyAdded = graph->getNodes();
+            const std::set<std::shared_ptr<Node>>& alreadyAdded = graph->getNodes();
             for (std::shared_ptr<Node> parent : parents) {
                 if (alreadyAdded.find(parent) == alreadyAdded.end()) {
                     buffer.push_back(parent);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 53d9f844a6a5bda4961659b8ff7f8b1fcf53b4e7..7cb4e1dcf33b71bec87ea883aceb8c8a3c49a5ba 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -28,13 +28,15 @@ Aidge::Connector Aidge::GraphView::operator()(
   assert((inputNodes().size() == 1U) && "Too many input Nodes for the GraphView, undefined behaviour");
   std::shared_ptr<Node> inNode = *inputNodes().begin();
   assert((ctors.size() == static_cast<std::size_t>(inNode->nbDataInputs())) && "Wrong number of arguments.\n");
-  for (__attribute__((unused)) std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
+  for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inNode->inputs()) {
     assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
+    (void)input; // avoid unused warning
   }
 
-  for (__attribute__((unused)) const Connector &ctor : ctors) {
+  for (const Connector &ctor : ctors) {
     assert((ctor.node() != nullptr) &&
            "Input Connector must be associated with a node");
+    (void)ctors; // avoid unused warning
   }
   IOIndex_t inID = 0;
   for (const Connector &ctor : ctors) {
@@ -324,7 +326,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
   // add learnable parameters to the graph
   if (includeLearnableParam) {
     for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
-      std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
+      std::shared_ptr<Node> parentNode = node->getParent(static_cast<IOIndex_t>(i));
       if (parentNode) {
           parentNode->addView(shared_from_this());
           mNodes.insert(parentNode);
@@ -462,13 +464,13 @@ Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const {
 
 
 std::shared_ptr<Aidge::Node>
-Aidge::GraphView::getNode(const char *nodeName) const {
+Aidge::GraphView::getNode(const std::string& nodeName) const {
   std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
-      mNodeRegistry.find(std::string(nodeName));
+      mNodeRegistry.find(nodeName);
   if (it != mNodeRegistry.end()) {
     return it->second;
   } else {
-    printf("No Node named %s in the current GraphView.\n", nodeName);
+    printf("No Node named %s in the current GraphView.\n", nodeName.c_str());
     exit(-1);
   }
 }
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 286ed7136a369e63f567b35135f89afcc266e0e1..abf572831d8f0b5c2c5eb836ea46e05b8114da55 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -17,8 +17,8 @@
 #include <vector>
 #include "aidge/utils/Types.h"
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, const char *name)
-    : mName((name == nullptr) ? std::string() : std::string(name)),
+Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
+    : mName(name),
       mOperator(op),
       mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()), nullptr)),
       mChildren(std::vector<std::vector<std::weak_ptr<Node>>>(static_cast<std::size_t>(op->nbOutputs()),
@@ -35,8 +35,9 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, const char *name)
 
 Aidge::Connector Aidge::Node::operator()(const std::vector<Connector> &ctors) {
     assert((ctors.size() == nbDataInputs()) && "Wrong number of arguments.\n");
-    for (__attribute__((unused)) std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inputs()) {
+    for (std::pair<std::shared_ptr<Node>, IOIndex_t> &input : inputs()) {
         assert((gk_IODefaultIndex == input.second) && "At least one input connection is not free.\n");
+        (void) input; // avoid unused warning
     }
     IOIndex_t i = 0;
     for (const Connector &ctor : ctors) {
@@ -225,7 +226,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 }
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
-    if (getParents(inId) != nullptr) {
+    if (getParent(inId) != nullptr) {
         printf("Warning, you're replacing a Parent.\n");
     }
     assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp
index f5f33fb049dec440f3bae412348c83e3427f06ce..124878fc45fe632d4a584e76a0eae6e7acfd53b9 100644
--- a/src/graph/OpArgs.cpp
+++ b/src/graph/OpArgs.cpp
@@ -14,13 +14,13 @@
 #include "aidge/graph/OpArgs.hpp"
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::initializer_list<OpArgs> inputs) {
+std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
     for (const OpArgs& elt : inputs) {
         if(elt.node() != nullptr) {
             // >= to allow incomplete graphViews
             assert(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size());
-            /* 
+            /*
             *  /!\ mn.view()->outputNodes() is a set, order of Nodes cannot be guaranted.
             *  Prefer a functional description for detailed inputs
             */
@@ -44,7 +44,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::initializer_list<OpArgs
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::initializer_list<OpArgs> inputs) {
+std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = std::make_shared<GraphView>();
     for(const OpArgs& elt : inputs) {
         if (elt.node()!=nullptr)
@@ -56,7 +56,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::initializer_list<OpArgs>
 }
 
 
-std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::initializer_list<OpArgs> inputs) {
+std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs) {
     std::shared_ptr<GraphView> gv = Sequential(inputs);
     assert(gv->outputNodes().size() == 1U && "Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection");
     std::shared_ptr<Node> lastNode = *gv->outputNodes().begin();
@@ -70,4 +70,4 @@ std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::initializer_list<OpArgs>
     assert(lastNode->getNbFreeDataInputs()>=1);
     gv->addChild(lastNode, firstNode, 0U, gk_IODefaultIndex);
     return gv;
-}
\ No newline at end of file
+}
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index d776a13441d8f16446cc195c8fd893527b321ff9..09a17a428e1de91c0318f710e6f097573cf529a6 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -38,6 +38,9 @@ Aidge::NbElts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) co
 Aidge::NbElts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
     return mImpl->getNbProducedData(outputIdx);
 }
+void Aidge::Operator::updateConsummerProducer(){
+    mImpl->updateConsummerProducer();
+}
 
 void Aidge::Operator::runHooks() const {
     for (auto& hook : mHooks) {
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index dc565bf0acc7747d79ec12df973a82d86fc79503..561d25776a28f1aad8f8c943711887ec6661a10c 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -59,12 +59,12 @@ void Aidge::fuseMulAdd(std::set<std::shared_ptr<Node>> nodes){
 
     // Step 2 : Branch existing producers & create the others
     // link weights & bias
-    if (matmul->getParents(1)==nullptr) {
-        matmul->getParents(0)->addChild(fc, 0, 1);
+    if (matmul->getParent(1)==nullptr) {
+        matmul->getParent(0)->addChild(fc, 0, 1);
     } else {
-        if (matmul->getParents(0)!=nullptr)
-            matmul->getParents(0)->addChild(fc, 0, 0);
-        matmul->getParents(1)->addChild(fc, 0, 1);
+        if (matmul->getParent(0)!=nullptr)
+            matmul->getParent(0)->addChild(fc, 0, 0);
+        matmul->getParent(1)->addChild(fc, 0, 1);
     }
     (producer_add_bias.first)->addChild(fc,0,2);
 
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index fce46397ffd286a2ddbe254752b241578415e3d8..dc0768d2b6f7a1dd46fc0a8523b950011f7dcf5d 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -20,7 +20,7 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
-void drawProgressBar(double progress, int barWidth, const char* additionalInfo = nullptr) {
+void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") {
     putchar('[');
     int pos = static_cast<int>(barWidth * progress);
     for (int i = 0; i < barWidth; ++i) {
@@ -29,30 +29,23 @@ void drawProgressBar(double progress, int barWidth, const char* additionalInfo =
         else
             putchar(' ');
     }
-    printf("] %d%% | %s\r", static_cast<int>(progress * 100), (additionalInfo ? additionalInfo : ""));
+    printf("] %d%% | %s\r", static_cast<int>(progress * 100), additionalInfo.c_str());
     fflush(stdout);
 }
 
-// TODO: handle multiple inputs/outputs
-void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
-    if (frowardDims) {mGraphView->forwardDims(); }
-
-    mScheduling.clear();
-
+void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
     // setup initial producers list
-    // add each Producer Node.
-    std::set<std::shared_ptr<Node>> computationOver;
-    std::size_t computationNumber = 0;
+    mComputationNumber = 0;
     std::set<std::shared_ptr<Node>> producers;
     for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) {
         if (nodePtr->type() == "Producer") {
             producers.insert(nodePtr);
         } else {
-            ++computationNumber;
+            ++mComputationNumber;
         }
     }
     // add Data Input
-    // FIXME : shoudl be changed when the real system for providing
+    // FIXME : should be changed when the real system for providing
     // data is implemented
     for (const std::shared_ptr<Node>& nodePtr : mGraphView->inputNodes()) {
         for (const auto& parentPtr : nodePtr->getParents()) {
@@ -112,22 +105,10 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
             }
         }
 
-        // run sequencially every runnable consumers once
-        // TODO: handle memory allocation in scheduler
-        // TODO: optimize memory usage
+        // Push consumers in the list of nodes to run and update the consumer producer system
         for (const auto& runnable : runnableConsumers) {
-            if (verbose)
-                printf("run: %s\n",
-                       (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
-            else
-                drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(computationNumber), 50,
-                                (std::string("running ") + runnable->type() + "_" +
-                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get())))
-                                        .c_str());
-            const auto tStart = std::chrono::high_resolution_clock::now();
-            runnable->forward();
-            const auto tEnd = std::chrono::high_resolution_clock::now();
-            mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+            runnable->getOperator()->updateConsummerProducer();
+            mStaticSchedule.push_back(runnable);
         }
 
         // update producers and consumers list
@@ -165,18 +146,6 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
                 }
             }
 
-            bool computationOverForConsumer = true;
-            for (IOIndex_t parentIDi = 0; parentIDi < consumer->nbInputs(); ++parentIDi) {
-                if (consumer->getOperator()->getNbConsumedData(parentIDi) <
-                    consumer->getOperator()->getNbRequiredData(parentIDi)) {
-                    computationOverForConsumer = false;
-                    break;
-                }
-            }
-            if (computationOverForConsumer) {
-                computationOver.insert(consumer);
-            }
-
             for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) {
                 if (consumer->getOperator()->getNbProducedData(outId) > 0) {
                     if (verbose) printf("  also producer\n");
@@ -198,8 +167,52 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
 
         if (verbose) printf("*************\n");
     } while (!consumers.empty());
+
+}
+
+// TODO: handle multiple inputs/outputs
+void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+    if (forwardDims) {mGraphView->forwardDims(); }
+
+    // add each Producer Node.
+    std::set<std::shared_ptr<Node>> computationOver;
+
+    mScheduling.clear();
+
+    this->generateScheduling();
+
+    // TODO: For loop on the list of node to run
+    // run sequencially every runnable consumers once
+    // TODO: handle memory allocation in scheduler
+    // TODO: optimize memory usage
+    for (const auto& runnable : mStaticSchedule) {
+        bool computationOverForConsumer = true;
+        for (IOIndex_t parentIDi = 0; parentIDi < runnable->nbInputs(); ++parentIDi) {
+            if (runnable->getOperator()->getNbConsumedData(parentIDi) <
+                runnable->getOperator()->getNbRequiredData(parentIDi)) {
+                computationOverForConsumer = false;
+                break;
+            }
+        }
+        if (computationOverForConsumer) {
+            computationOver.insert(runnable);
+        }
+
+        if (verbose)
+            printf("run: %s\n",
+                    (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str());
+        else
+            drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(mComputationNumber), 50,
+                            (std::string("running ") + runnable->type() + "_" +
+                                std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
+        const auto tStart = std::chrono::high_resolution_clock::now();
+        runnable->forward();
+        const auto tEnd = std::chrono::high_resolution_clock::now();
+        mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd));
+    }
     if (!verbose) drawProgressBar(1.0, 50, "                                   ");
     printf("\n");
+
 }
 
 void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const {
@@ -232,4 +245,4 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::SequentialScheduler::getConsumers(
     }
 
     return consumers;
-}
\ No newline at end of file
+}