diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml index da0d23c9de978ebcdbb370a6f4a92262829e05b9..73b85c8a409e675c849b9ca66557c63b5acf6359 100644 --- a/.gitlab/ci/build.gitlab-ci.yml +++ b/.gitlab/ci/build.gitlab-ci.yml @@ -12,6 +12,7 @@ build:ubuntu_cpp: - make -j4 all install artifacts: + expire_in: 1 week paths: - build_cpp/ - install_cpp/ @@ -29,6 +30,7 @@ build:ubuntu_python: - export AIDGE_INSTALL=`pwd`/install - python3 -m pip install . artifacts: + expire_in: 1 week paths: - venv/ @@ -57,6 +59,7 @@ build:windows_cpp: - cmake --install . --config Debug artifacts: + expire_in: 1 week paths: - build_cpp/ - install_cpp/ diff --git a/.gitlab/ci/coverage.gitlab-ci.yml b/.gitlab/ci/coverage.gitlab-ci.yml index 027f3078180bb32b36ca4666f171dda90ef7f7be..3c7b7654190e0768adc6a904f1cb548f020b0c92 100644 --- a/.gitlab/ci/coverage.gitlab-ci.yml +++ b/.gitlab/ci/coverage.gitlab-ci.yml @@ -24,8 +24,10 @@ coverage:ubuntu_python: script: - source venv/bin/activate - python3 -m pip install numpy coverage - - cd aidge_core - - python3 -m coverage run --source=. -m unittest discover -s unit_tests/ -v -b + - cd ${CI_PROJECT_NAME} + # Retrieve the installation path of the module, since it is installed with pip. + - export MODULE_LOCATION=`python -c "import ${CI_PROJECT_NAME} as _; print(_.__path__[0])"` + - python3 -m coverage run --source=$MODULE_LOCATION -m unittest discover -s unit_tests/ -v -b - python3 -m coverage report - python3 -m coverage xml coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' @@ -33,4 +35,4 @@ coverage:ubuntu_python: reports: coverage_report: coverage_format: cobertura - path: aidge_core/coverage.xml + path: ${CI_PROJECT_NAME}/coverage.xml diff --git a/.gitlab/ci/static_analysis.gitlab-ci.yml b/.gitlab/ci/static_analysis.gitlab-ci.yml index f7c09a33a65801fb25b1f20f76eac5a7a7952917..3955b87d4efdd9b3610b661779ab9709320754f2 100644 --- a/.gitlab/ci/static_analysis.gitlab-ci.yml +++ b/.gitlab/ci/static_analysis.gitlab-ci.yml @@ -26,8 +26,8 @@ static_analysis:python: script: - pip install pylint - pip install pylint-gitlab - - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter aidge_core/ > codeclimate.json - - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter aidge_core/ > pylint.html + - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabCodeClimateReporter ${CI_PROJECT_NAME}/ > codeclimate.json + - pylint --rcfile=.pylintrc --exit-zero --output-format=pylint_gitlab.GitlabPagesHtmlReporter ${CI_PROJECT_NAME}/ > pylint.html - mkdir -p public/python/$CI_COMMIT_REF_NAME - mv pylint.html public/python/$CI_COMMIT_REF_NAME/ artifacts: diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml index 1e67ce273abc7d6b02f9e3148264ff3f9ea1cf07..81e6ca9ac5b868287aa0ef27040c0ead785d3639 100644 --- a/.gitlab/ci/test.gitlab-ci.yml +++ b/.gitlab/ci/test.gitlab-ci.yml @@ -17,14 +17,14 @@ test:ubuntu_python: - docker script: - source venv/bin/activate - - cd aidge_core + - cd ${CI_PROJECT_NAME} - python3 -m pip install unittest-xml-reporting - python3 -m pip list # Run on discovery all tests located in core/unit_tests/python - python3 -m xmlrunner discover -s unit_tests/ -v -b --output-file xmlrunner-results.xml artifacts: reports: - junit: aidge_core/xmlrunner-results.xml + junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml test:windows_cpp: stage: test @@ -37,6 +37,7 @@ test:windows_cpp: - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) # Install dependencies - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + - choco install python -Y # Update PATH - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") script: diff --git a/CMakeLists.txt b/CMakeLists.txt index 67ad9304bc3e682a9436fb52306b3ca8120c1c4b..b764086c8e974dc53aadd345cdd287918d599afb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -52,9 +52,9 @@ target_include_directories(${module_name} ) # PYTHON BINDING -generate_python_binding(${project} ${module_name}) - if (PYBIND) + generate_python_binding(${project} ${module_name}) + # Handles Python + pybind11 headers dependencies target_link_libraries(${module_name} PUBLIC @@ -66,22 +66,12 @@ endif() target_compile_features(${module_name} PRIVATE cxx_std_14) - -if(WERROR) - target_compile_options(${module_name} PRIVATE +target_compile_options(${module_name} PRIVATE $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>: - -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>) - target_compile_options(${module_name} PRIVATE + -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow $<$<BOOL:${WERROR}>:-Werror>>) +target_compile_options(${module_name} PRIVATE $<$<CXX_COMPILER_ID:MSVC>: /W4>) -else() - target_compile_options(${module_name} PRIVATE - $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>: - -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>) - target_compile_options(${module_name} PRIVATE - $<$<CXX_COMPILER_ID:MSVC>: - /W4>) -endif() if(CMAKE_COMPILER_IS_GNUCXX AND COVERAGE) append_coverage_compiler_flags() diff --git a/cmake/PybindModuleCreation.cmake b/cmake/PybindModuleCreation.cmake index 18f4abc38e2537c3f4d949f08772a57b90758cb0..8030c1a8639e4b7ae0c5fb865e928a4260c6ae7d 100644 --- a/cmake/PybindModuleCreation.cmake +++ b/cmake/PybindModuleCreation.cmake @@ -1,23 +1,21 @@ -function(generate_python_binding name target_to_bind) - if (PYBIND) - add_definitions(-DPYBIND) - Include(FetchContent) +function(generate_python_binding name target_to_bind) + add_definitions(-DPYBIND) + Include(FetchContent) - FetchContent_Declare( - PyBind11 - GIT_REPOSITORY https://github.com/pybind/pybind11.git - GIT_TAG v2.10.4 # or a later release - ) + FetchContent_Declare( + PyBind11 + GIT_REPOSITORY https://github.com/pybind/pybind11.git + GIT_TAG v2.10.4 # or a later release + ) - # Use the New FindPython mode, recommanded. Requires CMake 3.15+ - find_package(Python COMPONENTS Interpreter Development) - FetchContent_MakeAvailable(PyBind11) + # Use the New FindPython mode, recommanded. Requires CMake 3.15+ + find_package(Python COMPONENTS Interpreter Development) + FetchContent_MakeAvailable(PyBind11) - message(STATUS "Creating binding for module ${name}") - file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp") + message(STATUS "Creating binding for module ${name}") + file(GLOB_RECURSE pybind_src_files "python_binding/*.cpp") - pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install - target_include_directories(${name} PUBLIC "python_binding") - target_link_libraries(${name} PUBLIC ${target_to_bind}) - endif() + pybind11_add_module(${name} MODULE ${pybind_src_files} "NO_EXTRAS") # NO EXTRA recquired for pip install + target_include_directories(${name} PUBLIC "python_binding") + target_link_libraries(${name} PUBLIC ${target_to_bind}) endfunction() diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index ff6601c487ea97294019a12ba899d251b08077e7..13c360796fb4912ffb6b5ad17d68c7b56b38b943 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -34,7 +34,8 @@ #include "aidge/operator/FC.hpp" #include "aidge/operator/GenericOperator.hpp" #include "aidge/operator/Matmul.hpp" -#include "aidge/operator/MetaOperator.hpp" +#include "aidge/operator/MaxPooling.hpp" +//#include "aidge/operator/MetaOperator.hpp" #include "aidge/operator/Operator.hpp" #include "aidge/operator/Producer.hpp" #include "aidge/operator/ReLU.hpp" diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp index 5aa2829e16f612b0867ab69feccb829ba2095e1b..d10270b62bb75412a6cbd9203b9b7a3fe220e5aa 100644 --- a/include/aidge/backend/OperatorImpl.hpp +++ b/include/aidge/backend/OperatorImpl.hpp @@ -20,7 +20,7 @@ namespace Aidge { class OperatorImpl { public: virtual void forward(){}; - virtual void backward() {} + virtual void backward(){}; /** * @brief Minimum amount of data from a specific input required by the @@ -46,13 +46,19 @@ public: virtual NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const = 0; /** - * @brief TOtal amount of produced data ready to be used on a specific output. + * @brief Total amount of produced data ready to be used on a specific output. * * @param outputIdx Index of the output analysed. * @return DimSize_t */ virtual NbElts_t getNbProducedData(const IOIndex_t outputIdx) const = 0; + /** + * @brief Update the Consummer Producer system by simulating the consumption and production of i/o + * + */ + virtual void updateConsummerProducer() = 0; + virtual ~OperatorImpl() = default; }; } // namespace Aidge diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index 718eddeaf6a5d08c9dab4898f5a57c0192dcb80b..f11136adaaa3d23fa9d3dc5749dd5d6771cbc42c 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -208,7 +208,7 @@ public: * @brief Get the Nodes pointed to by the GraphView object. * @return std::set<NodePtr> */ - inline std::set<NodePtr> getNodes() const { return mNodes; } + inline const std::set<NodePtr>& getNodes() const { return mNodes; } /** * @brief Get the operator with the corresponding name if it is in the @@ -217,7 +217,7 @@ public: * @return NodePtr returns a new empty node if the one asked for * was not found. */ - NodePtr getNode(const char *nodeName) const; + NodePtr getNode(const std::string& nodeName) const; /** * @brief Remove a Node from the current GraphView scope without affecting its connections. diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp index f056505e6e7839266213ac355cc0e1b93ab98f0d..11def52dbab30159e9e882fb19d16f1549aa3887 100644 --- a/include/aidge/graph/Node.hpp +++ b/include/aidge/graph/Node.hpp @@ -62,7 +62,7 @@ public: * @param op Operator giving the Node its number of connections. * @param name (optional) name for the Node. */ - Node(std::shared_ptr<Operator> op, const char *name = nullptr); + Node(std::shared_ptr<Operator> op, const std::string& name = ""); virtual ~Node() = default; diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp index 560c3a02c641c29526752dbf352905d0ded32a7e..9d1ba6fd1e1df594634bfd93a24663ff178b7ee6 100644 --- a/include/aidge/graph/OpArgs.hpp +++ b/include/aidge/graph/OpArgs.hpp @@ -55,7 +55,7 @@ public: * @param inputs List of Node and GraphView to link sequentially. * @return std::shared_ptr<GraphView> Pointer to the generated view. */ -std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs); +std::shared_ptr<GraphView> Sequential(std::vector<OpArgs> inputs); ///////////////////////////// // Parallel @@ -65,7 +65,7 @@ std::shared_ptr<GraphView> Sequential(std::initializer_list<OpArgs> inputs); * @param inputs List of Node and GraphView to link sequentially. * @return std::shared_ptr<GraphView> pointer to the generated view. */ -std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs); +std::shared_ptr<GraphView> Parallel(std::vector<OpArgs> inputs); ///////////////////////////// // Residual @@ -79,8 +79,8 @@ std::shared_ptr<GraphView> Parallel(std::initializer_list<OpArgs> inputs); * @param inputs List of Node and GraphView to link sequentially. * @return std::shared_ptr<GraphView> pointer to the generated view. */ -std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs); +std::shared_ptr<GraphView> Residual(std::vector<OpArgs> inputs); } -#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */ \ No newline at end of file +#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */ diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index c96b2c571f412124ccdfb83dde685e111448a222..ff3d1888c3bc70b61a3d4da42908d40de2d1d73e 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -141,7 +141,7 @@ public: }; template <std::size_t NUM> -inline std::shared_ptr<Node> Add(const char* name = nullptr) { +inline std::shared_ptr<Node> Add(const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name); } } diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index 7bf8740877e635cc2e59418bee1c444c7f3884e8..bf76bd45893b43043b81cd6563c500be27c66b42 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -146,7 +146,7 @@ public: template <std::array<DimSize_t, 1>::size_type DIM> inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims, - const char *name = nullptr, + const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) { // FIXME: properly handle default w&b initialization in every cases @@ -158,7 +158,7 @@ inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel template <DimSize_t DIM> inline std::shared_ptr<Node> AvgPooling( DimSize_t const (&kernel_dims)[DIM], - const char *name = nullptr, + const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) { static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported"); diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 07af5fa8416cf726e209cd9e690af345b321fb0e..6861c1359737f3f344f0c7d9b2d12c9ff35b88ad 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -144,7 +144,7 @@ public: template <DimSize_t DIM> inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F, const float momentum = 0.1F, - const char *name = nullptr) { + const std::string& name = "") { static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported"); auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name); addProducer(batchNorm, 1, std::array<DimSize_t,0>({}), "scale"); diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index d6efba2cec6908ad58b9feea5e53807c7227cc88..1edc94b96763cc163646037a8bd069023511df67 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -166,7 +166,7 @@ template <std::array<DimSize_t, 1>::size_type DIM> inline std::shared_ptr<Node> Conv(DimSize_t in_channels, DimSize_t out_channels, const std::array<DimSize_t, DIM> &kernel_dims, - const char *name = nullptr, + const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { @@ -184,7 +184,7 @@ inline std::shared_ptr<Node> Conv( DimSize_t in_channels, DimSize_t out_channels, DimSize_t const (&kernel_dims)[DIM], - const char *name = nullptr, + const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index a3b7fbf3b21a5b3fd9e532e0cc19cebd46e5d022..95a2ff55b70dbed9299fb3dca98fb9b0e700d210 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -165,7 +165,7 @@ class ConvDepthWise_Op : public Operator, template <std::array<DimSize_t, 1>::size_type DIM> inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims, - const char *name = nullptr, + const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { @@ -180,7 +180,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &ker template <DimSize_t DIM> inline std::shared_ptr<Node> ConvDepthWise( DimSize_t const (&kernel_dims)[DIM], - const char *name = nullptr, + const std::string& name = "", const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0), const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) { diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 6e4c54a030c108c29c08a8f5dfdc24d084ccc91c..db92dc9c735416d250fa32e2f9010b21b8f808c0 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -139,7 +139,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) { +inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name); addProducer(fc, 1, {out_channels, 1}, "w"); diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 94cdc6727de7078ca4fc3bb0940a01731feb92cc..12fb7e16741e9f7ad96d51b0b847b91265c3a7d2 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -163,7 +163,7 @@ class GenericOperator_Op * @return std::shared_ptr<Node> Node associated with the Generic Operator. */ inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut, - const char *name = nullptr) { + const std::string& name = "") { return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name); } } // namespace Aidge diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 64587d51de784082da455eb64aa5bbe175773b5d..1dff2550a42245351afab5b8bb1a708a8d0d8c0b 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -117,7 +117,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) { +inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name); } diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp index b44e8a9b9540e287ff35af1c9642c8202fd096d0..639b366912060b3e085510f312d94568e6b65f03 100644 --- a/include/aidge/operator/Matmul.hpp +++ b/include/aidge/operator/Matmul.hpp @@ -129,7 +129,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) { +inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name); addProducer(matmul, 1, {1, out_channels}, "w"); diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp new file mode 100644 index 0000000000000000000000000000000000000000..073243e801c6e1297129424b0c93b1a7c4f112f3 --- /dev/null +++ b/include/aidge/operator/MaxPooling.hpp @@ -0,0 +1,174 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CORE_OPERATOR_MAXPOOLING_H_ +#define AIDGE_CORE_OPERATOR_MAXPOOLING_H_ + +#include <array> +#include <numeric> +#include <vector> +#include <cmath> + +#include "aidge/data/Tensor.hpp" +#include "aidge/graph/Node.hpp" +#include "aidge/operator/Operator.hpp" +#include "aidge/operator/Producer.hpp" +#include "aidge/utils/Parameter.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" + +namespace Aidge { +enum class MaxPoolingParam { StrideDims, KernelDims, PaddingDims }; + +template <DimIdx_t DIM> +class MaxPooling_Op : public Operator, + public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>, + public Parameterizable<MaxPoolingParam, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, (DIM<<1) >> { +private: + // FIXME: change accessibility + std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>(); + const std::shared_ptr<Tensor> mOutput = std::make_shared<Tensor>(); + +public: + static constexpr const char *Type = "MaxPooling"; + + MaxPooling_Op() = delete; + + using Parameterizable_ = Parameterizable<MaxPoolingParam, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, DIM>, + std::array<DimSize_t, (DIM<<1)> >; + template <MaxPoolingParam e> + using param = typename Parameterizable_::template param<e>; + + constexpr MaxPooling_Op(const std::array<DimSize_t, DIM> &kernel_dims, + const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), + const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) + : Operator(Type), + Parameterizable_(param<MaxPoolingParam::StrideDims>(stride_dims), + param<MaxPoolingParam::KernelDims>(kernel_dims), + param<MaxPoolingParam::PaddingDims>(padding_dims)), + mOutput(std::make_shared<Tensor>()) { + setDatatype(DataType::Float32); + } + + constexpr void associateInput(const IOIndex_t inputIdx, std::shared_ptr<Data> data) override final { + assert(inputIdx < 1 && "operators supports only 3 inputs"); + (void) inputIdx; // avoid unused warning + assert(strcmp(data->type(), Tensor::Type) == 0 && "input data must be of Tensor type"); + + mInput = std::dynamic_pointer_cast<Tensor>(data); + } + + constexpr void computeOutputDims() override final { + if (!mInput->empty()) { + std::array<DimSize_t, DIM + 2> outputDims = {}; + + for (std::size_t dim = 0; dim < this->template get<MaxPoolingParam::KernelDims>().size() ; ++dim) { + outputDims[dim+2] = 1 + static_cast<DimSize_t>( + std::floor(static_cast<float>(mInput->dims()[dim+2] - + this->template get<MaxPoolingParam::KernelDims>()[dim] + + this->template get<MaxPoolingParam::PaddingDims>()[dim] + + this->template get<MaxPoolingParam::PaddingDims>()[dim+DIM]) / + static_cast<float>(this->template get<MaxPoolingParam::StrideDims>()[dim]))); + } + outputDims[1] = mInput->dims()[1]; + outputDims[0] = mInput->dims()[0]; + mOutput->resize(outputDims); + } + } + + bool outputDimsForwarded() const override final { return !(mOutput->empty()); } + + + inline Tensor& input(const IOIndex_t inputIdx) const override final { + assert(inputIdx == 0 && "operators supports only 1 inputs"); + (void) inputIdx; // avoid unused warning + return *(mInput.get()); + } + inline Tensor& output(const IOIndex_t /*outputIdx*/) const override final { return *(mOutput.get()); } + + + inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final { + assert(inputIdx == 0 && "MaxPooling Operators supports only 1 inputs"); + (void) inputIdx; // avoid unused warning + return mInput; + } + inline std::shared_ptr<Tensor> getOutput(const IOIndex_t outputIdx) const override final { + assert(outputIdx == 0 && "MaxPooling Operators has only 1 outputs"); + (void) outputIdx; // avoid unused warning + return mOutput; + } + + + std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final { + assert(inputIdx == 0 && "operators supports only 1 inputs"); + (void) inputIdx; // avoid unused warning + return std::static_pointer_cast<Data>(mInput); + } + std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const override final { + assert(outputIdx == 0 && "operator supports only 1 output"); + (void) outputIdx; // avoid unused warning + return std::static_pointer_cast<Data>(mOutput); + } + + + void setBackend(const std::string &name) { + mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this); + mOutput->setBackend(name); + + // FIXME: temporary workaround + mInput->setBackend(name); + } + + void setDatatype(const DataType &datatype) { + mOutput->setDatatype(datatype); + + // FIXME: temporary workaround + mInput->setDatatype(datatype); + } + + inline IOIndex_t nbInputs() const noexcept override final { return 1; } + inline IOIndex_t nbDataInputs() const noexcept override final { return 1; } + inline IOIndex_t nbOutputs() const noexcept override final { return 1; } +}; + +template <std::array<DimSize_t, 1>::size_type DIM> +inline std::shared_ptr<Node> MaxPooling(const std::array<DimSize_t, DIM> &kernel_dims, + const std::string& name = "", + const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), + const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) { + // FIXME: properly handle default w&b initialization in every cases + static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported"); + auto avgPool = std::make_shared<Node>(std::make_shared<MaxPooling_Op<static_cast<DimIdx_t>(DIM)>>(kernel_dims, stride_dims, padding_dims), name); + return avgPool; +} + +template <DimSize_t DIM> +inline std::shared_ptr<Node> MaxPooling( + DimSize_t const (&kernel_dims)[DIM], + const std::string& name = "", + const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1), + const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) { + static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported"); + return MaxPooling(to_array(kernel_dims), name, stride_dims, padding_dims); +} +} // namespace Aidge + +namespace { +template <> +const char *const EnumStrings<Aidge::MaxPoolingParam>::data[] = {"StrideDims", "KernelDims", "PaddingDims"}; +} + +#endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */ diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index 30e1ce2a7f664485077282405ec60ddf49513cb5..36f846ddae329be28b8e51e2bff1580a509562e1 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -78,6 +78,8 @@ public: */ NbElts_t getNbProducedData(const IOIndex_t outputIdx) const; + void updateConsummerProducer(); + virtual void forward(); virtual void backward(); diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 1f77400ce8a8ef727ea9e0a7d12477c6519ea2df..acdc69b69ab86b25a11d889980b9236e41928316 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -113,32 +113,32 @@ public: }; template <std::array<DimSize_t, 1>::size_type DIM> -inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const char *name = nullptr) { +inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "") { static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported"); return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name); } template <std::size_t DIM> -inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const char *name = nullptr) { +inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") { return Producer(to_array(dims), name); } -inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const char *name = nullptr) { +inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "") { return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor), name); } template <std::array<DimSize_t, 1>::size_type DIM> -void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) { +void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) { assert(inputIdx != gk_IODefaultIndex); static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported"); - const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str(); + const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension); auto prod = Producer(dims, prodName); prod->addChild(otherNode, 0, inputIdx); otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0)); } template <std::size_t DIM> -void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) { +void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) { addProducer(otherNode, inputIdx, to_array(dims), extension); } } // namespace Aidge diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 3ea90462cf2b083a1a61ae39be06471093ec9f9f..141bd3ae12c7875a90d2549a24e5c141f3ff6aba 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -106,7 +106,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> ReLU(const char* name = nullptr) { +inline std::shared_ptr<Node> ReLU(const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name); } diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 93eb262f703ca7eb385641c77df7ae7e79c00b96..64e713b331bbbbf612ee5102ba0ea82fb108350e 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -106,7 +106,7 @@ public: inline IOIndex_t nbOutputs() const noexcept override final { return 1; } }; -inline std::shared_ptr<Node> Softmax(const char* name = nullptr) { +inline std::shared_ptr<Node> Softmax(const std::string& name = "") { // FIXME: properly handle default w&b initialization in every cases return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name); } diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index 81b3f31662933fe4f59a17cdb0ee42441fb791bc..9916ee2004bd1aa9f33acf96d95cae4703f692df 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -43,6 +43,8 @@ public: }; ~SequentialScheduler() = default; + void generateScheduling(bool verbose = false); + /** * @brief Run the provided Computational Graph with a batch of data */ @@ -54,6 +56,15 @@ public: */ void saveSchedulingDiagram(const std::string& fileName) const; + /** + * @brief Return a vector of Node ordered by the order they are called by the scheduler + * + * @return std::vector<std::shared_ptr<Node>> + */ + std::vector<std::shared_ptr<Node>> getStaticScheduling(){ + return mStaticSchedule; + } + private: /** * @brief Set of layers receiving an input from currently processing layers @@ -63,9 +74,27 @@ private: */ std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const; + /** + * @brief Shared ptr to the scheduled graph view + * + */ std::shared_ptr<GraphView> mGraphView; + /** + * @brief List of SchedulingElement (i.e: Nodes with their computation time) + * + */ std::vector<SchedulingElement> mScheduling; + /** + * @brief List of nodes ordered by their + * + */ + std::vector<std::shared_ptr<Node>> mStaticSchedule; + /** + * @brief Number of computation node (i.e: nb nodes != Producer) + * + */ + std::size_t mComputationNumber = 0; // TODO: Check if not inferable from mStaticSchedule }; } // namespace Aidge -#endif /* AIDGE_SCHEDULER_H_ */ \ No newline at end of file +#endif /* AIDGE_SCHEDULER_H_ */ diff --git a/python_binding/graph/pybind_OpArgs.cpp b/python_binding/graph/pybind_OpArgs.cpp index 305c0b73101a97c242413ff84a5ae099764e7e77..6ea89f91945ac44f2142c5b9e8440b11ec6a1663 100644 --- a/python_binding/graph/pybind_OpArgs.cpp +++ b/python_binding/graph/pybind_OpArgs.cpp @@ -10,19 +10,20 @@ ********************************************************************************/ #include <pybind11/pybind11.h> +#include <pybind11/stl.h> + #include "aidge/graph/OpArgs.hpp" #include "aidge/graph/Node.hpp" #include "aidge/graph/GraphView.hpp" -#include <pybind11/stl.h> -#include <pybind11/complex.h> -#include <pybind11/functional.h> -#include <pybind11/chrono.h> + namespace py = pybind11; namespace Aidge { void init_OpArgs(py::module& m){ py::class_<OpArgs, std::shared_ptr<OpArgs>>(m, "OpArgs") + .def(py::init<const std::shared_ptr<GraphView>&>(), py::arg("view_")) + .def(py::init<const std::shared_ptr<Node>&>(), py::arg("node_")) .def("node", &OpArgs::node) .def("view", &OpArgs::view) ; diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp index d7099e3856d48262f0f4bbacf025f5a960a220fa..3efcf7c5345bbc835aeaf6dcbc416769b8654439 100644 --- a/python_binding/operator/pybind_Add.cpp +++ b/python_binding/operator/pybind_Add.cpp @@ -23,7 +23,7 @@ namespace Aidge { template <std::size_t NUM> void declare_Add(py::module &m) { py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "Add_Op", py::multiple_inheritance()); - m.def("Add", &Add<NUM>, py::arg("name") = nullptr); + m.def("Add", &Add<NUM>, py::arg("name") = ""); } void init_Add(py::module &m) { diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp index 66dadba7244a199bd4ca8a0dd814f20a8049a62f..ecbb743d33cc5750bc60aeed8e5207dcec0c23dc 100644 --- a/python_binding/operator/pybind_AvgPooling.cpp +++ b/python_binding/operator/pybind_AvgPooling.cpp @@ -37,10 +37,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { py::arg("stride_dims"), py::arg("padding_dims")); - m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, - const char* name, - std::vector<DimSize_t> &stride_dims, - std::vector<DimSize_t> &padding_dims) { + m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + const std::string& name, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &padding_dims) { // Lambda function wrapper because PyBind fails to convert const array. // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. if (kernel_dims.size() != DIM) { @@ -69,7 +69,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array; return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array)); }, py::arg("kernel_dims"), - py::arg("name") = nullptr, + py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0)); diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp index 52578c55ac0e3e1112bdbedc15bbaa3e155d9b44..70d9bce003033e1264ac39764271773fa84c760f 100644 --- a/python_binding/operator/pybind_BatchNorm.cpp +++ b/python_binding/operator/pybind_BatchNorm.cpp @@ -24,7 +24,7 @@ template <DimSize_t DIM> void declare_BatchNormOp(py::module& m) { py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, PyAbstractParametrizable>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()); - m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = nullptr); + m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = ""); } void init_BatchNorm(py::module &m) { diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp index 3cf5d818f9b6e3bdfaf9a2d0b74ec0480beb6967..7e366305f287e958ea7500695c1f3285908017b1 100644 --- a/python_binding/operator/pybind_Conv.cpp +++ b/python_binding/operator/pybind_Conv.cpp @@ -44,11 +44,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels, DimSize_t out_channels, - std::vector<DimSize_t>& kernel_dims, - const char* name, - std::vector<DimSize_t> &stride_dims, - std::vector<DimSize_t> &padding_dims, - std::vector<DimSize_t> &dilation_dims) { + const std::vector<DimSize_t>& kernel_dims, + const std::string& name, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &padding_dims, + const std::vector<DimSize_t> &dilation_dims) { // Lambda function wrapper because PyBind fails to convert const array. // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. if (kernel_dims.size() != DIM) { @@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { }, py::arg("in_channels"), py::arg("out_channels"), py::arg("kernel_dims"), - py::arg("name") = nullptr, + py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0), py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index b64409bdbb5f094e85cb094017a6fb837893a2db..8a81e7ba184536cbd535db24519495400bce6fdb 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -39,11 +39,11 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { py::arg("padding_dims"), py::arg("dilation_dims")); - m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, - const char* name, - std::vector<DimSize_t> &stride_dims, - std::vector<DimSize_t> &padding_dims, - std::vector<DimSize_t> &dilation_dims) { + m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + const std::string& name, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &padding_dims, + const std::vector<DimSize_t> &dilation_dims) { // Lambda function wrapper because PyBind fails to convert const array. // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. if (kernel_dims.size() != DIM) { @@ -80,7 +80,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array; return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array)); }, py::arg("kernel_dims"), - py::arg("name") = nullptr, + py::arg("name") = "", py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0), py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1)); diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp index 82eaa0062b7db0e57da3d78d56e503e3a4beb19f..3b4137c6f208f96d256c72300437cc978658b84f 100644 --- a/python_binding/operator/pybind_FC.cpp +++ b/python_binding/operator/pybind_FC.cpp @@ -23,7 +23,7 @@ namespace Aidge { void declare_FC(py::module &m) { py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, PyAbstractParametrizable>(m, "FC_Op", py::multiple_inheritance()); - m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = nullptr); + m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = ""); } void init_FC(py::module &m) { diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp index 578d2ccd2ed143c3f9a67c0430c12aa7214cb8dc..bec59eaf2cecdc7f64d1da07580116c4b3334992 100644 --- a/python_binding/operator/pybind_GenericOperator.cpp +++ b/python_binding/operator/pybind_GenericOperator.cpp @@ -22,7 +22,7 @@ namespace Aidge { void init_GenericOperator(py::module& m) { py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, Operator>(m, "GenericOperatorOp", py::multiple_inheritance()) - .def("get_parameter_type", &GenericOperator_Op::getParameterType) + .def("get_parameter_type", &GenericOperator_Op::getParameterType) .def("get_parameters_name", &GenericOperator_Op::getParametersName) .def("add_parameter", &GenericOperator_Op::addParameter<bool>) .def("add_parameter", &GenericOperator_Op::addParameter<int>) @@ -34,10 +34,10 @@ void init_GenericOperator(py::module& m) { .def("add_parameter", &GenericOperator_Op::addParameter<std::vector<std::string>>) .def("get_parameter", [](GenericOperator_Op& self, std::string key) -> py::object { /* - This getParameter method returns the good python type without having to have + This getParameter method returns the good python type without having to have prior knowledge of the parameter type. */ - py::object res = py::none(); + py::object res = py::none(); std::string paramType = self.getParameterType(key); if(paramType == typeid(int).name()) res = py::cast(self.getParameter<int>(key)); @@ -62,6 +62,6 @@ void init_GenericOperator(py::module& m) { }); m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"), - py::arg("name") = nullptr); + py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp index 27a292f0baf2673f3d963f3c3b9a69892c4c6521..c062d93f5c40fe46336fe34f6d1664f24da07732 100644 --- a/python_binding/operator/pybind_LeakyReLU.cpp +++ b/python_binding/operator/pybind_LeakyReLU.cpp @@ -21,6 +21,6 @@ namespace Aidge { void init_LeakyReLU(py::module& m) { py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, PyAbstractParametrizable>(m, "LeakyReLU_Op", py::multiple_inheritance()); - m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = nullptr); + m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp index c81845ca5e5ba3674356d16db660f4e3550e9004..b6ae27289fabe1fe4dbeea60704a61373bc850cf 100644 --- a/python_binding/operator/pybind_Matmul.cpp +++ b/python_binding/operator/pybind_Matmul.cpp @@ -23,7 +23,7 @@ namespace Aidge { void declare_Matmul(py::module &m) { py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance()); - m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = nullptr); + m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = ""); } void init_Matmul(py::module &m) { diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9bd951c446e080ff27b099527ac9bbc350646140 --- /dev/null +++ b/python_binding/operator/pybind_MaxPooling.cpp @@ -0,0 +1,89 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ +#ifdef PYBIND +#include <pybind11/pybind11.h> +#include <pybind11/stl.h> + +#include <string> +#include <vector> +#include <array> + +#include "aidge/utils/Parameter.hpp" +#include "aidge/backend/OperatorImpl.hpp" +#include "aidge/operator/MaxPooling.hpp" +#include "aidge/operator/Operator.hpp" +#include "aidge/utils/Types.h" +#include "aidge/data/Tensor.hpp" + +namespace py = pybind11; +namespace Aidge { + +template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { + py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Operator, PyAbstractParametrizable>( + m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(), + py::multiple_inheritance()) + .def(py::init<const std::array<DimSize_t, DIM> &, + const std::array<DimSize_t, DIM> &, + const std::array<DimSize_t, (DIM<<1)> &>(), + py::arg("kernel_dims"), + py::arg("stride_dims"), + py::arg("padding_dims")); + + m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, + const std::string& name, + const std::vector<DimSize_t> &stride_dims, + const std::vector<DimSize_t> &padding_dims) { + // Lambda function wrapper because PyBind fails to convert const array. + // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. + if (kernel_dims.size() != DIM) { + throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]"); + } + if (stride_dims.size() != DIM) { + throw std::runtime_error("stride_dims size [" + std::to_string(stride_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]"); + } + if (padding_dims.size() != (DIM<<1)) { + throw std::runtime_error("padding_dims size [" + std::to_string(padding_dims.size()) + "] does not match DIM [" + std::to_string(DIM<<1) +"]"); + } + DimSize_t tmp_kernel_dims_array[DIM]; + for (size_t i = 0; i < DIM; ++i) { + tmp_kernel_dims_array[i] = kernel_dims[i]; + } + DimSize_t tmp_stride_dims_array[DIM]; + for (size_t i = 0; i < DIM; ++i) { + tmp_stride_dims_array[i] = stride_dims[i]; + } + DimSize_t tmp_padding_dims_array[DIM<<1]; + for (size_t i = 0; i < (DIM<<1); ++i) { + tmp_padding_dims_array[i] = padding_dims[i]; + } + const DimSize_t (&kernel_dims_array)[DIM] = tmp_kernel_dims_array; + const DimSize_t (&stride_dims_array)[DIM] = tmp_stride_dims_array; + const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array; + return MaxPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array)); + }, py::arg("kernel_dims"), + py::arg("name") = "", + py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1), + py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0)); + +} + + +void init_MaxPooling(py::module &m) { + declare_MaxPoolingOp<1>(m); + declare_MaxPoolingOp<2>(m); + declare_MaxPoolingOp<3>(m); + + // FIXME: + // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const + // (&)[1])>(&MaxPooling)); +} +} // namespace Aidge +#endif \ No newline at end of file diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp index 5757891a30c5b40dcfa5ff99b1f06e00376f475a..ea9880800059e8993996e67138f89419c165fc4f 100644 --- a/python_binding/operator/pybind_Producer.cpp +++ b/python_binding/operator/pybind_Producer.cpp @@ -25,7 +25,7 @@ namespace Aidge { template <DimIdx_t DIM> void declare_Producer(py::module &m) { // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name")); - m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const char*)>(&Producer), py::arg("dims"), py::arg("name") = nullptr); + m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = ""); } @@ -36,7 +36,7 @@ void init_Producer(py::module &m) { "ProducerOp", py::multiple_inheritance()) .def("dims", &Producer_Op::dims); - m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const char*)>(&Producer), py::arg("tensor"), py::arg("name") = nullptr); + m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = ""); declare_Producer<1>(m); declare_Producer<2>(m); diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp index e0d34d5a91a4ed1fcb8507198eb222b2d02e4e26..820589d76507b39ca65ac2397614aabd1221fe3e 100644 --- a/python_binding/operator/pybind_ReLU.cpp +++ b/python_binding/operator/pybind_ReLU.cpp @@ -20,6 +20,6 @@ namespace Aidge { void init_ReLU(py::module& m) { py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLU_Op", py::multiple_inheritance()); - m.def("ReLU", &ReLU, py::arg("name") = nullptr); + m.def("ReLU", &ReLU, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp index 13ba96ade4f5c5d132274e457efa5b4edcd3dc78..72ac1107181c1d7e2f578e31a965636dbb5c111b 100644 --- a/python_binding/operator/pybind_Softmax.cpp +++ b/python_binding/operator/pybind_Softmax.cpp @@ -21,6 +21,6 @@ namespace Aidge { void init_Softmax(py::module& m) { py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "Softmax_Op", py::multiple_inheritance()); - m.def("Softmax", &Softmax, py::arg("name") = nullptr); + m.def("Softmax", &Softmax, py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index b861f881c684a2fbe800ab672299871cfc89d7ac..78418d51a5c410cb56bb8421fd7f3dc6ec6d32db 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -29,6 +29,7 @@ void init_FC(py::module&); void init_GenericOperator(py::module&); void init_LeakyReLU(py::module&); void init_Matmul(py::module&); +void init_MaxPooling(py::module&); void init_Producer(py::module&); void init_ReLU(py::module&); void init_Softmax(py::module&); @@ -75,6 +76,7 @@ void init_Aidge(py::module& m){ init_GenericOperator(m); init_LeakyReLU(m); init_Matmul(m); + init_MaxPooling(m); init_ReLU(m); init_Softmax(m); diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp index 2490d5c55a497223b13bceee6772c2dd44e733ef..85479d41f51e74dee4079e78a37e7f3a520639e2 100644 --- a/python_binding/scheduler/pybind_Scheduler.cpp +++ b/python_binding/scheduler/pybind_Scheduler.cpp @@ -10,6 +10,7 @@ ********************************************************************************/ #include <pybind11/pybind11.h> +#include <pybind11/stl.h> #include "aidge/scheduler/Scheduler.hpp" #include "aidge/graph/GraphView.hpp" @@ -20,6 +21,8 @@ void init_Scheduler(py::module& m){ .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view")) .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false) .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name")) + .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false) + .def("get_static_scheduling", &SequentialScheduler::getStaticScheduling) ; } } diff --git a/setup.ps1 b/setup.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..61324cf4a7d64094f5ead498adf64719c3290f06 --- /dev/null +++ b/setup.ps1 @@ -0,0 +1,52 @@ +# Helper setup tool to automatically build aidge_core on Windows. + +# Requirements +################################################################################ +# You have either VS BuildTools or VS Community already present on your +# system, with the build tools installed. +# If not, download Visual Studio Community here: +# https://visualstudio.microsoft.com/fr/vs/community/ +# Make sure to install the "Desktop Development with C++" workload. +# Run this script in a Powershell console with Administrator rights in order to +# automatically install the dependencies, or just execute the second part if you +# already have all the dependencies satisfied. + +# Enable or disable automatic installation of requirements +# Run .\setup.ps1 -install_reqs:$false to disable it +param ([bool]$install_reqs=$true) + +# Default install path is .\install_cpp +if (-not $env:AIDGE_INSTALL_PATH) +{ + $env:AIDGE_INSTALL_PATH = $(Join-Path $pwd install_cpp) +} + +# 1. Setup environment +################################################################################ +if ($install_reqs) +{ + # Install Chocolatey + Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + # Install dependencies + choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y + choco install git -Y + choco install python -Y + # Update PATH + $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") +} + +# 2. Compile & install aidge_core +################################################################################ +mkdir -Force build_cpp +mkdir -Force $env:AIDGE_INSTALL_PATH +Set-Location build_cpp +cmake -DCMAKE_INSTALL_PREFIX:PATH=$env:AIDGE_INSTALL_PATH -DCMAKE_BUILD_TYPE=Debug .. +if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError } +cmake --build . -j2 +if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError } +cmake --install . --config Debug +if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError } +# Optional: run the unit tests +ctest --output-on-failure +if(!$?) { $lastError = $LASTEXITCODE; Set-Location $PSScriptRoot; Exit $lastError } +Set-Location $PSScriptRoot diff --git a/src/graph/Connector.cpp b/src/graph/Connector.cpp index f189b92b24cc5529ae8fb6d8c9faac97e296a92c..cd2ceff8b58076a5054269e4676120b94c8b5beb 100644 --- a/src/graph/Connector.cpp +++ b/src/graph/Connector.cpp @@ -39,7 +39,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ct graph->add(nodesToAdd.back()); // only add, connection already done // between nodes std::vector<std::shared_ptr<Node>> parents = nodesToAdd.back()->getParents(); - std::set<std::shared_ptr<Node>> alreadyAdded = graph->getNodes(); + const std::set<std::shared_ptr<Node>>& alreadyAdded = graph->getNodes(); for (std::shared_ptr<Node> parent : parents) { if (alreadyAdded.find(parent) == alreadyAdded.end()) { buffer.push_back(parent); diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index ad412f5b86d9cf0dee0823736548baeb7c7320a7..a0641032281c6bedb4459a0d08da1193d6375129 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -464,13 +464,13 @@ Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const { std::shared_ptr<Aidge::Node> -Aidge::GraphView::getNode(const char *nodeName) const { +Aidge::GraphView::getNode(const std::string& nodeName) const { std::map<std::string, std::shared_ptr<Node>>::const_iterator it = - mNodeRegistry.find(std::string(nodeName)); + mNodeRegistry.find(nodeName); if (it != mNodeRegistry.end()) { return it->second; } else { - printf("No Node named %s in the current GraphView.\n", nodeName); + printf("No Node named %s in the current GraphView.\n", nodeName.c_str()); exit(-1); } } diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp index b3db5befbdc8299114514d8d554d439bffc5eae2..5fcc0e1139d8ccd9368eaba90231fb12370e761e 100644 --- a/src/graph/Node.cpp +++ b/src/graph/Node.cpp @@ -17,8 +17,8 @@ #include <vector> #include "aidge/utils/Types.h" -Aidge::Node::Node(std::shared_ptr<Operator> op, const char *name) - : mName((name == nullptr) ? std::string() : std::string(name)), +Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name) + : mName(name), mOperator(op), mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()), nullptr)), mChildren(std::vector<std::vector<std::weak_ptr<Node>>>(static_cast<std::size_t>(op->nbOutputs()), diff --git a/src/graph/OpArgs.cpp b/src/graph/OpArgs.cpp index f5f33fb049dec440f3bae412348c83e3427f06ce..124878fc45fe632d4a584e76a0eae6e7acfd53b9 100644 --- a/src/graph/OpArgs.cpp +++ b/src/graph/OpArgs.cpp @@ -14,13 +14,13 @@ #include "aidge/graph/OpArgs.hpp" -std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::initializer_list<OpArgs> inputs) { +std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::vector<OpArgs> inputs) { std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(); for (const OpArgs& elt : inputs) { if(elt.node() != nullptr) { // >= to allow incomplete graphViews assert(static_cast<std::size_t>(elt.node()->getNbFreeDataInputs()) >= gv->outputNodes().size()); - /* + /* * /!\ mn.view()->outputNodes() is a set, order of Nodes cannot be guaranted. * Prefer a functional description for detailed inputs */ @@ -44,7 +44,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Sequential(std::initializer_list<OpArgs } -std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::initializer_list<OpArgs> inputs) { +std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::vector<OpArgs> inputs) { std::shared_ptr<GraphView> gv = std::make_shared<GraphView>(); for(const OpArgs& elt : inputs) { if (elt.node()!=nullptr) @@ -56,7 +56,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::Parallel(std::initializer_list<OpArgs> } -std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::initializer_list<OpArgs> inputs) { +std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::vector<OpArgs> inputs) { std::shared_ptr<GraphView> gv = Sequential(inputs); assert(gv->outputNodes().size() == 1U && "Zero or more than one output Node for the GraphView, don't know which one to choose from for the residual connection"); std::shared_ptr<Node> lastNode = *gv->outputNodes().begin(); @@ -70,4 +70,4 @@ std::shared_ptr<Aidge::GraphView> Aidge::Residual(std::initializer_list<OpArgs> assert(lastNode->getNbFreeDataInputs()>=1); gv->addChild(lastNode, firstNode, 0U, gk_IODefaultIndex); return gv; -} \ No newline at end of file +} diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp index 99b07235e2917527160f03af997747f02947dcf9..b3896b12143488275b2a064819595c380da62844 100644 --- a/src/operator/Operator.cpp +++ b/src/operator/Operator.cpp @@ -38,6 +38,9 @@ Aidge::NbElts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) co Aidge::NbElts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const { return mImpl->getNbProducedData(outputIdx); } +void Aidge::Operator::updateConsummerProducer(){ + mImpl->updateConsummerProducer(); +} void Aidge::Operator::forward() { mImpl->forward(); } diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index fce46397ffd286a2ddbe254752b241578415e3d8..dc0768d2b6f7a1dd46fc0a8523b950011f7dcf5d 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -20,7 +20,7 @@ #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" -void drawProgressBar(double progress, int barWidth, const char* additionalInfo = nullptr) { +void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") { putchar('['); int pos = static_cast<int>(barWidth * progress); for (int i = 0; i < barWidth; ++i) { @@ -29,30 +29,23 @@ void drawProgressBar(double progress, int barWidth, const char* additionalInfo = else putchar(' '); } - printf("] %d%% | %s\r", static_cast<int>(progress * 100), (additionalInfo ? additionalInfo : "")); + printf("] %d%% | %s\r", static_cast<int>(progress * 100), additionalInfo.c_str()); fflush(stdout); } -// TODO: handle multiple inputs/outputs -void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) { - if (frowardDims) {mGraphView->forwardDims(); } - - mScheduling.clear(); - +void Aidge::SequentialScheduler::generateScheduling(bool verbose) { // setup initial producers list - // add each Producer Node. - std::set<std::shared_ptr<Node>> computationOver; - std::size_t computationNumber = 0; + mComputationNumber = 0; std::set<std::shared_ptr<Node>> producers; for (const std::shared_ptr<Node>& nodePtr : mGraphView->getNodes()) { if (nodePtr->type() == "Producer") { producers.insert(nodePtr); } else { - ++computationNumber; + ++mComputationNumber; } } // add Data Input - // FIXME : shoudl be changed when the real system for providing + // FIXME : should be changed when the real system for providing // data is implemented for (const std::shared_ptr<Node>& nodePtr : mGraphView->inputNodes()) { for (const auto& parentPtr : nodePtr->getParents()) { @@ -112,22 +105,10 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) { } } - // run sequencially every runnable consumers once - // TODO: handle memory allocation in scheduler - // TODO: optimize memory usage + // Push consumers in the list of nodes to run and update the consumer producer system for (const auto& runnable : runnableConsumers) { - if (verbose) - printf("run: %s\n", - (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str()); - else - drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(computationNumber), 50, - (std::string("running ") + runnable->type() + "_" + - std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))) - .c_str()); - const auto tStart = std::chrono::high_resolution_clock::now(); - runnable->forward(); - const auto tEnd = std::chrono::high_resolution_clock::now(); - mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd)); + runnable->getOperator()->updateConsummerProducer(); + mStaticSchedule.push_back(runnable); } // update producers and consumers list @@ -165,18 +146,6 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) { } } - bool computationOverForConsumer = true; - for (IOIndex_t parentIDi = 0; parentIDi < consumer->nbInputs(); ++parentIDi) { - if (consumer->getOperator()->getNbConsumedData(parentIDi) < - consumer->getOperator()->getNbRequiredData(parentIDi)) { - computationOverForConsumer = false; - break; - } - } - if (computationOverForConsumer) { - computationOver.insert(consumer); - } - for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) { if (consumer->getOperator()->getNbProducedData(outId) > 0) { if (verbose) printf(" also producer\n"); @@ -198,8 +167,52 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) { if (verbose) printf("*************\n"); } while (!consumers.empty()); + +} + +// TODO: handle multiple inputs/outputs +void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) { + if (forwardDims) {mGraphView->forwardDims(); } + + // add each Producer Node. + std::set<std::shared_ptr<Node>> computationOver; + + mScheduling.clear(); + + this->generateScheduling(); + + // TODO: For loop on the list of node to run + // run sequencially every runnable consumers once + // TODO: handle memory allocation in scheduler + // TODO: optimize memory usage + for (const auto& runnable : mStaticSchedule) { + bool computationOverForConsumer = true; + for (IOIndex_t parentIDi = 0; parentIDi < runnable->nbInputs(); ++parentIDi) { + if (runnable->getOperator()->getNbConsumedData(parentIDi) < + runnable->getOperator()->getNbRequiredData(parentIDi)) { + computationOverForConsumer = false; + break; + } + } + if (computationOverForConsumer) { + computationOver.insert(runnable); + } + + if (verbose) + printf("run: %s\n", + (runnable->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))).c_str()); + else + drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(mComputationNumber), 50, + (std::string("running ") + runnable->type() + "_" + + std::to_string(reinterpret_cast<uintptr_t>(runnable.get())))); + const auto tStart = std::chrono::high_resolution_clock::now(); + runnable->forward(); + const auto tEnd = std::chrono::high_resolution_clock::now(); + mScheduling.push_back(SchedulingElement(runnable, tStart, tEnd)); + } if (!verbose) drawProgressBar(1.0, 50, " "); printf("\n"); + } void Aidge::SequentialScheduler::saveSchedulingDiagram(const std::string& fileName) const { @@ -232,4 +245,4 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::SequentialScheduler::getConsumers( } return consumers; -} \ No newline at end of file +}