Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_export_arm_cortexm
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Eclipse Projects
aidge
aidge_export_arm_cortexm
Commits
ef1e8e2e
Commit
ef1e8e2e
authored
1 year ago
by
Vincent Templier
Browse files
Options
Downloads
Patches
Plain Diff
Integrate memory manager support
parent
7ec94ab1
No related branches found
No related tags found
2 merge requests
!17
v0.1.0
,
!12
v0.4.0
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
aidge_export_arm_cortexm/memory.py
+86
-8
86 additions, 8 deletions
aidge_export_arm_cortexm/memory.py
aidge_export_arm_cortexm/templates/memory/mem_info.jinja
+3
-3
3 additions, 3 deletions
aidge_export_arm_cortexm/templates/memory/mem_info.jinja
with
89 additions
and
11 deletions
aidge_export_arm_cortexm/memory.py
+
86
−
8
View file @
ef1e8e2e
import
os
import
shutil
from
typing
import
List
from
pathlib
import
Path
import
aidge_core
import
aidge_core
import
aidge_backend_cpu
import
aidge_backend_cpu
from
typing
import
List
# for each layer, name: [size, offset start]
# for each layer,
# name [size, stride, length, count, contigious offset, contigious size, wrapping offset, wrapping size]
# true values [nb_outputs, nb_outputs, width, width, offset start, total size, 0, 0]
# Example:
#define ENV_MEM_SIZE 3
#define ENV_MEM_STRIDE 3
#define ENV_MEM_LENGTH 224
#define ENV_MEM_COUNT 224
#define ENV_MEM_CONT_OFFSET 0
#define ENV_MEM_CONT_SIZE 150528
#define ENV_MEM_WRAP_OFFSET 0
#define ENV_MEM_WRAP_SIZE 0
MEMORY_INFO_TEMPLATE
=
[
"
layer_name
"
,
"
size
"
,
"
stride
"
,
"
length
"
,
"
count
"
,
"
cont_offset
"
,
"
cont_size
"
,
"
wrap_offset
"
,
"
wrap_size
"
]
# for each layer, name: [size, offset start] (old style)
# Example:
# Example:
#define ENV_MEM_SIZE 3
#define ENV_MEM_SIZE 3
#define ENV_OFFSET 0
#define ENV_OFFSET 0
MEMORY_INFO_TEMPLATE
=
[
"
layer_name
"
,
"
size
"
,
"
offset
"
]
#
MEMORY_INFO_TEMPLATE = ["layer_name", "size", "offset"]
# Default memory management, which can be used for development
# Default memory management, which can be used for development
...
@@ -25,8 +42,9 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler):
...
@@ -25,8 +42,9 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler):
for
dim
in
dims
:
for
dim
in
dims
:
mem
*=
dim
mem
*=
dim
# Add memeory info
# Add memory info
mem_info
.
append
([
node
.
name
(),
mem
,
mem_size
])
# Only size and cont_offset matter
mem_info
.
append
([
node
.
name
(),
mem
,
0
,
0
,
0
,
mem_size
,
mem
,
0
,
0
])
# Increment offset for the next layer
# Increment offset for the next layer
mem_size
+=
mem
mem_size
+=
mem
...
@@ -34,17 +52,77 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler):
...
@@ -34,17 +52,77 @@ def compute_default_mem_info(scheduler: aidge_core.Scheduler):
return
mem_size
,
mem_info
return
mem_size
,
mem_info
def
generate_optimized_memory_info
(
scheduler
:
aidge_core
.
Scheduler
,
def
generate_optimized_memory_info
(
stats_folder
:
Path
,
scheduler
:
aidge_core
.
Scheduler
,
wrapping
:
bool
=
False
):
wrapping
:
bool
=
False
):
# The forward dims has to done outside the function
# The forward dims has to done outside the function
# Also supposed the generation of the scheduler has been performed outside
# Otherwise decomment the following line
# scheduler.generate_scheduling()
# Generate the memory manager
# Generate the memory manager
# So far, the Producers are not take in consideration in the meory manager => inc_producers=False
mem_manager
=
scheduler
.
generate_memory
(
inc_producers
=
False
,
wrap_around_buffer
=
wrapping
)
mem_manager
=
scheduler
.
generate_memory
(
inc_producers
=
False
,
wrap_around_buffer
=
wrapping
)
mem_size
=
0
# In the export, we currently use an unified memory buffer whose size
# is determined by the memory peak usage
mem_size
=
mem_manager
.
get_peak_usage
()
mem_info
=
[]
mem_info
=
[]
# Add the parsing of the memory manager
mem_planes
=
mem_manager
.
get_planes
()
for
node
in
scheduler
.
get_static_scheduling
():
# Skip memory management for the parameter producers
if
node
.
type
()
==
"
Producer
"
:
if
node
.
get_operator
().
get_attr
(
"
Constant
"
):
continue
else
:
# Input memory management (suppose tensor ends with [:, channel, height, width]))
tensor
=
node
.
get_operator
().
get_output
(
0
)
if
tensor
is
None
:
raise
RuntimeError
(
"
Warning input producer not provided
"
)
if
len
(
tensor
.
dims
())
<
3
:
raise
RuntimeError
(
"
Input producer dimensions must be with [:, channel, height, width]
"
)
name
=
node
.
name
()
size
=
tensor
.
dims
()[
-
3
]
# Should be nb_channels
stride
=
tensor
.
dims
()[
-
3
]
# Should be nb_channels
length
=
tensor
.
dims
()[
-
1
]
# Should be width
count
=
tensor
.
dims
()[
-
2
]
# Should be height
cont_offset
=
0
# Suppose input data is stored outside the export function
# so the memory offset is not important to consider
cont_size
=
tensor
.
dims
()[
-
1
]
*
tensor
.
dims
()[
-
2
]
*
tensor
.
dims
()[
-
3
]
# Size of input
wrap_offset
=
0
# No wrapping
wrap_size
=
0
# No wrapping
else
:
plane
=
mem_planes
[
node
][
0
]
name
=
node
.
name
()
size
=
plane
.
size
stride
=
plane
.
stride
length
=
plane
.
length
count
=
plane
.
count
cont_offset
=
plane
.
get_contiguous_offset
()
cont_size
=
plane
.
get_contiguous_size
()
wrap_offset
=
plane
.
get_wrapped_offset
()
wrap_size
=
plane
.
get_wrapped_size
()
mem_info
.
append
([
name
,
size
,
stride
,
length
,
count
,
cont_offset
,
cont_size
,
wrap_offset
,
wrap_size
])
# Use gnuplot to generate the log
try
:
os
.
makedirs
(
str
(
stats_folder
/
"
graph
"
),
exist_ok
=
True
)
mem_manager
.
log
(
"
memory_info
"
)
os
.
chmod
(
"
memory_info_plot.gnu
"
,
0o777
)
os
.
system
(
"
./memory_info_plot.gnu
"
)
shutil
.
move
(
"
memory_info
"
,
str
(
stats_folder
/
"
graph
"
/
"
memory_info
"
))
shutil
.
move
(
"
memory_info_plot.png
"
,
str
(
stats_folder
/
"
graph
"
/
"
memory_info_plot.png
"
))
os
.
remove
(
"
memory_info_plot.gnu
"
)
except
:
print
(
"
Please install gnuplot if you want memory plot from MemoryManager.
"
)
return
mem_size
,
mem_info
return
mem_size
,
mem_info
\ No newline at end of file
This diff is collapsed.
Click to expand it.
aidge_export_arm_cortexm/templates/memory/mem_info.jinja
+
3
−
3
View file @
ef1e8e2e
{#- For name header -#}
#ifndef MEM_INFO_H
#ifndef MEM_INFO_H
#define MEM_INFO_H
#define MEM_INFO_H
#define MEMORY_SIZE {{ mem_size }}
#define MEMORY_SIZE {{ mem_size }}
#define MEMORY_ALIGNMENT {{ mem_alignment }}
{% for i in range(mem_info|length) -%}
{% for i in range(mem_info|length) -%}
{%- set layer_name = mem_info[i][0] %}
{%- set layer_name = mem_info[i][0] %}
/* {{layer_name}} memory */
/* {{layer_name}} memory */
{% for j in range(1, mem_info[i]|length) %}
{% for j in range(1, mem_info[i]|length) %}
#define {{ layer_name|upper }}_{{ mem_info_legends[j]|upper }} {{ mem_info[i][j] }}
#define {{ layer_name|upper }}_
MEM_
{{ mem_info_legends[j]|upper }} {{ mem_info[i][j] }}
{%- endfor %}
{%- endfor %}
{% endfor %}
{% endfor %}
#endif /* MEM_INFO_H */
#endif /* MEM_INFO_H */
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment