diff --git a/docs/conf.py b/docs/conf.py index 87c1fd165c86d43782974ecf52894b3ee1b1d844..91d98f515ca6c3553a81b2bc3c1954bdb0e15a08 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -51,6 +51,7 @@ extensions = [ 'sphinx.ext.todo', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', + 'sphinx.ext.autosectionlabel', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', @@ -65,6 +66,10 @@ extensions = [ 'sphinx_copybutton' ] +# Make sure the target is unique +autosectionlabel_prefix_document = True +autosectionlabel_maxdepth = 4 + graphviz_output_format = 'svg' # Mermaid Configuration @@ -157,7 +162,7 @@ html_theme = 'pydata_sphinx_theme' html_theme_options = { # 'logo_only': True, # 'style_nav_header_background': '#F5F5F5', - "show_toc_level": 1, + "show_toc_level": 2, "show_nav_level": 2, "navigation_depth": 2, "navbar_align": "content", diff --git a/docs/source/API/Cpp/index.rst b/docs/source/API/Cpp/index.rst index e8f0e7bef13b7e77df5231ceb72c01f6e957363a..80be0326635fcdebf80dccc7cd00865d3604a4fb 100644 --- a/docs/source/API/Cpp/index.rst +++ b/docs/source/API/Cpp/index.rst @@ -4,5 +4,4 @@ Aidge C++ API .. toctree:: - data.rst - op.md \ No newline at end of file + data.rst \ No newline at end of file diff --git a/docs/source/API/index.rst b/docs/source/API/index.rst index 511bb6efd2ee9315fc62be26485e6238d6ab5d89..24d2d71e855fbb811ac595c80cf8d81c73cd87d9 100644 --- a/docs/source/API/index.rst +++ b/docs/source/API/index.rst @@ -1,5 +1,5 @@ -Aidge API -========= +API +=== .. toctree:: diff --git a/docs/source/UserGuide/GetStarted/index.rst b/docs/source/GetStarted/index.rst similarity index 100% rename from docs/source/UserGuide/GetStarted/index.rst rename to docs/source/GetStarted/index.rst diff --git a/docs/source/UserGuide/GetStarted/install.rst b/docs/source/GetStarted/install.rst similarity index 100% rename from docs/source/UserGuide/GetStarted/install.rst rename to docs/source/GetStarted/install.rst diff --git a/docs/source/UserGuide/GetStarted/quickStart.rst b/docs/source/GetStarted/quickStart.rst similarity index 74% rename from docs/source/UserGuide/GetStarted/quickStart.rst rename to docs/source/GetStarted/quickStart.rst index 6e61cbeb62f2f4d5514906b57e663218967832d4..bea3d8f5da02e08a7c0c07900b7c4a393b94884d 100644 --- a/docs/source/UserGuide/GetStarted/quickStart.rst +++ b/docs/source/GetStarted/quickStart.rst @@ -2,9 +2,9 @@ Quick Start =========== -Fake example of quck start TODO fill this section ... - +Fake example of quick start TODO fill this section ... +First we need to create a : :ref:`Tensor <source/userguide/data:tensor>`. .. tab-set:: diff --git a/docs/source/UserGuide/FrameworkPresentation/index.rst b/docs/source/UserGuide/FrameworkPresentation/index.rst deleted file mode 100644 index 07f0dabebe4c0eb4b6b0c58d529f839746a7ebaf..0000000000000000000000000000000000000000 --- a/docs/source/UserGuide/FrameworkPresentation/index.rst +++ /dev/null @@ -1,6 +0,0 @@ -Overview of the Aidge Framework -=============================== - -Context -------- - diff --git a/docs/source/UserGuide/architecture.rst b/docs/source/UserGuide/architecture.rst new file mode 100644 index 0000000000000000000000000000000000000000..e3ce0bf8d4be6622e3ec80a3812b94711f99c7c6 --- /dev/null +++ b/docs/source/UserGuide/architecture.rst @@ -0,0 +1,23 @@ +Framework architecture +====================== + + +AIDGE is based on a principle of modular architecture (completion by plugin) in order to allow the addition of functionality and to meet needs not expressed during the initial design of the latter. Thus, let's not remove the limit of what this platform can offer while taking care to maintain its performance. + +Core +---- + +The Core module is developped entirely in C++ (14) and includes a set of functions enabling to: + +- Create a computational graph to model a DNN ; +- Modify the computational graph (e.g. by deleting or replacing a node of the graph); +- Do graph matching to find specific sequence of operators in the computational graph; +- Instanciate standard operators (without implementation); +- Instanciate standard data structures, such as Tensor (without implementation); +- Create standard schedulers (sequential), to execute the computational graph +- Access standard graph optimization functionalities, such as fusion of operators + + - FuseMulAdd: Fuse MatMul and Add operators into a FullyConnected operator + - FuseConvBatchNorm: Fuse BatchNorm into a Convolution operator + +.. TODO Describe CPU plugin existence \ No newline at end of file diff --git a/docs/source/UserGuide/benchmark.rst b/docs/source/UserGuide/benchmark.rst new file mode 100644 index 0000000000000000000000000000000000000000..a7ce8a6e5e5083e5ee1306c28838e96f2cfbe528 --- /dev/null +++ b/docs/source/UserGuide/benchmark.rst @@ -0,0 +1,3 @@ +Benchmark neural network model +============================== + diff --git a/docs/source/UserGuide/data.rst b/docs/source/UserGuide/data.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f3757520ff914430bea64af07e8c8dde10a1fd8 --- /dev/null +++ b/docs/source/UserGuide/data.rst @@ -0,0 +1,28 @@ +Data +==== + +Tensor +------ + +A Tensor is a multi-dimensional array defined by its dimensions, its datatype and its precision. + +A Tensor can be used to represent: + +- A raw input of a DNN, such as an image or a time serie; +- A label associated to a raw input; +- A processed input or label computed through a DNN; +- A parameter of a DNN, such as a weight. + +AIDGE can define Tensor having the following datatype and precision: + +.. list-table:: + :header-rows: 1 + + * - Datatype + - Precision + * - Float + - 64, 32, 16 + * - Int + - 64, 32, 16, 8, 7, 6, 5, 4, 3, 2, 1 + * - UInt + - 64, 32, 16, 8, 7, 6, 5, 4, 3, 2, 1 diff --git a/docs/source/UserGuide/ensureRobustness.rst b/docs/source/UserGuide/ensureRobustness.rst new file mode 100644 index 0000000000000000000000000000000000000000..e5d443c0a85fb7f4a34d31fb6825980d78d96230 --- /dev/null +++ b/docs/source/UserGuide/ensureRobustness.rst @@ -0,0 +1,3 @@ +Ensure robustness +================= + diff --git a/docs/source/UserGuide/executeGraph.rst b/docs/source/UserGuide/executeGraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..93a286b05c8c56f59f5ce613074c9dd87da0957b --- /dev/null +++ b/docs/source/UserGuide/executeGraph.rst @@ -0,0 +1,2 @@ +Runtime graph execution +======================= \ No newline at end of file diff --git a/docs/source/UserGuide/export.rst b/docs/source/UserGuide/export.rst new file mode 100644 index 0000000000000000000000000000000000000000..47f890b855d4a01701ab96a539a1b895a79900cb --- /dev/null +++ b/docs/source/UserGuide/export.rst @@ -0,0 +1,3 @@ +Perform an export +================= + diff --git a/docs/source/UserGuide/index.rst b/docs/source/UserGuide/index.rst index 0657a73a36a8cd0c47b677922cc2e1ba43ddaf26..0a29e2612a361810a2e645034c346354b27238c1 100644 --- a/docs/source/UserGuide/index.rst +++ b/docs/source/UserGuide/index.rst @@ -1,15 +1,47 @@ User Guide ========== -.. toctree:: - :maxdepth: 1 - :caption: Framework Presentation +Workflow overview +----------------- + +AIDGE allows designing and deploying Deep Neural Networks (DNN) on embedded systems. +The design and deployment stages are as follows: + +.. TODO : insert workflow fig + +High level functionalities +-------------------------- - ./FrameworkPresentation/index.rst +AIDGE offers functionalities that can be categorized according to the following diagram. +- :ref:`Load and store model <source/userguide/loadStoreModel:Load and store model>` functions used to load or store a graph model from/to a serialized format. +- **Model graph:** functions used to model a graph such as adding an operator. +- **Transform model:** functions used for manipulating the graph, such as graph duplication. +- **Provide data:** functions used for providing data in order to execute a graph on data. These functions must be runnable on device. +- **Generate graph:** generate kernels and scheduling for a specific target, on an already optimized graph. There is no graph manipulation here, the graph is supposed to be already prepared (quantization, tiling, operator mapping…) for the intended target. For graph preparation and optimization, see Optimize hardware mapping. +- **Static analysis of KPI:** functions for obtaining statics on the graph like number of parameters, operations etc, that can be computed without having to execute the graph. +- **Execute graph:** execute a graph, either using a backend library (simple implementation change, no generation or compilation involved), or compiled operators implementation, after passing through the Compile graph function in this case. +- **Learn model:** performing a training requires several functions of the workflow (Model graph, Provide data, Execute graph, Benchmark KPI). +- **Benchmark KPI:** all kind of benchmarking requiring to run the network on a target in order to perform a measurment: accuracy, execution time… Some of these functions must be runable on device. +- **Model Hardware:** functions to represent the hardware target. +- **Optimize hardware mapping:** high-level functions to optimize the hardware mapping: quantization, pruning, tiling… +- **Learn on edge:** high-level functions to condition the graph for edge learning, including continual learning and federated learning. +- **Ensure robustness:** high-level functions to condition the graph for robustness. .. toctree:: - :maxdepth: 1 - :caption: Get Started + :hidden: + :maxdepth: 2 - ./GetStarted/index.rst \ No newline at end of file + architecture.rst + data.rst + modelGraph.rst + loadStoreModel.rst + interoperability.rst + transformGraph.rst + staticAnalysis.rst + executeGraph.rst + benchmark.rst + ensureRobustness.rst + optimizeGraph.rst + export.rst + learnEdge.rst diff --git a/docs/source/UserGuide/interoperability.rst b/docs/source/UserGuide/interoperability.rst new file mode 100644 index 0000000000000000000000000000000000000000..b766f42a8a73043cd0194877fb9ac59f731e19f7 --- /dev/null +++ b/docs/source/UserGuide/interoperability.rst @@ -0,0 +1,2 @@ +Plateform interoperability +========================== \ No newline at end of file diff --git a/docs/source/UserGuide/learnEdge.rst b/docs/source/UserGuide/learnEdge.rst new file mode 100644 index 0000000000000000000000000000000000000000..46f83d1453bed2da63990775ccb5f9b505dba580 --- /dev/null +++ b/docs/source/UserGuide/learnEdge.rst @@ -0,0 +1,4 @@ +Learn on edge +============= + +Comming soon ... \ No newline at end of file diff --git a/docs/source/UserGuide/loadStoreModel.rst b/docs/source/UserGuide/loadStoreModel.rst new file mode 100644 index 0000000000000000000000000000000000000000..406e4d3d182b37ad6a3f878cbda9241cfc505ba2 --- /dev/null +++ b/docs/source/UserGuide/loadStoreModel.rst @@ -0,0 +1,2 @@ +Load and store model +==================== \ No newline at end of file diff --git a/docs/source/UserGuide/modelGraph.rst b/docs/source/UserGuide/modelGraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..c8889bc712de44252d1d3c8f124bbddc0f04690d --- /dev/null +++ b/docs/source/UserGuide/modelGraph.rst @@ -0,0 +1,2 @@ +Computational graph +=================== diff --git a/docs/source/UserGuide/optimizeGraph.rst b/docs/source/UserGuide/optimizeGraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..787db7416379d44b391eac7c29cb7c1e29a74c8c --- /dev/null +++ b/docs/source/UserGuide/optimizeGraph.rst @@ -0,0 +1,2 @@ +Optimize graph +============== \ No newline at end of file diff --git a/docs/source/UserGuide/staticAnalysis.rst b/docs/source/UserGuide/staticAnalysis.rst new file mode 100644 index 0000000000000000000000000000000000000000..a7a5a4d1cc7d06e1c074d0678abef4fd6b99ae86 --- /dev/null +++ b/docs/source/UserGuide/staticAnalysis.rst @@ -0,0 +1,2 @@ +Static analysis +=============== \ No newline at end of file diff --git a/docs/source/UserGuide/transformGraph.rst b/docs/source/UserGuide/transformGraph.rst new file mode 100644 index 0000000000000000000000000000000000000000..289eb67d34c75a4ccd4f4e827657a70052f8d799 --- /dev/null +++ b/docs/source/UserGuide/transformGraph.rst @@ -0,0 +1,3 @@ +Transform graph +=============== + diff --git a/docs/source/index.rst b/docs/source/index.rst index d3105edf904b9132eada4cbbfb4e4690d8fde677..36428512d10c63e662ded9b24077057f98caf4c4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -4,8 +4,7 @@ Aidge :Release: |version| :Date: |today| -Aidge is a `CEA LIST <https://list.cea.fr/en/>`_'s deep learning library -optimized for export and processing on embedded devices. +Aidge is a deep learning library optimized for export and processing on embedded devices. It allows to create or import a Computational Graph from common Frameworks, apply editing on its structure, train it and export its architecture on many @@ -17,19 +16,19 @@ well as training and many custom functionalities for the target device. :gutter: 1 .. grid-item-card:: :octicon:`desktop-download` Install - :link: UserGuide/GetStarted/install + :link: GetStarted/install :link-type: doc Find your configuration and requirements. .. grid-item-card:: :octicon:`table` Quick Start - :link: UserGuide/GetStarted/quickStart + :link: GetStarted/quickStart :link-type: doc Build, train and deploy your first network. - .. grid-item-card:: :octicon:`checklist` General Presentation - :link: UserGuide/FrameworkPresentation/index + .. grid-item-card:: :octicon:`checklist` User Guide + :link: UserGuide/index :link-type: doc The main hub for detailed usage explanations. @@ -55,22 +54,17 @@ well as training and many custom functionalities for the target device. What is Aidge ? --------------- -Aidge is a direct successor of `N2D2 <https://n2d2.readthedocs.io/en/latest/intro/intro.html>`_, -a solution for fast and accurate Deep Neural Network (DNN) simulation Framework as well -as full and automated DNN-based applications building. +AIDGE is an open source deep learning platform specialized in the design of deep neural networks intended to operate in systems constrained by power consumption or dissipation, latency, form factor (dimensions, size, etc.), and/or cost criteria. -Aidge keeps the numerous embedded supports that made the strength of N2D2 while -improving or adding many functionalities, among which: +AIDGE offers: -- A more userfriendly API and documentation. -- | An simpler structure for both the compuational graph and the code, - | making it easier to understand and edit the functions -- A highly customizable plugin system with templates and tutorials to follow -- A light core library -- World-leading, device-dependant, graph optimization functionalities -- Runtime execution on the target device with any of the core functionalities exportable -- Hand on the Scheduler -- Possibility to run a single DNN on heterogeneous targets +- an integrated approach encompassing the entire design flow, from application development to deployment: data formatting, neural network exploration, learning, testing, and optimized code generation, +- several functions to reduce the computational complexity of models and their memory requirements, most often using quantization (during or after training) and topological optimization techniques, +- compatibility with a wide range of commercially available hardware targets, offering optimized implementations for MCUs and DSPs, GPUs, FPGAs or NPUs, +- a modular design and simple abstraction layer, so features can be added and modified with ease, including the low-level implementation of calculation functions depending on the specific characteristics of the hardware being deployed (approximate calculation, specific saturated arithmetic, etc.), +- a high degree of interoperability, with support for the ONNX standard and integration with the PyTorch and Keras platforms, +- a multiparadigm approach that integrates the simulation of neuromorphic neural network models into the same platform, +- sovereignty and control of the code, as AIDGE is totally independent of other deep learning platforms. Licence @@ -84,5 +78,6 @@ Aidge is released under the Eclipse Public License 2.0 :hidden: UserGuide/index.rst + GetStarted/index.rst API/index.rst Tutorial/index.rst