Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_core
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Charles Villard
aidge_core
Commits
1c773b62
Commit
1c773b62
authored
8 months ago
by
Cyril Moineau
Browse files
Options
Downloads
Patches
Plain Diff
Add meminfo to ExportNode.
parent
80ca13c8
No related branches found
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
aidge_core/export_utils/export_registry.py
+1
-1
1 addition, 1 deletion
aidge_core/export_utils/export_registry.py
aidge_core/export_utils/node_export.py
+66
-11
66 additions, 11 deletions
aidge_core/export_utils/node_export.py
aidge_core/mem_info.py
+133
-0
133 additions, 0 deletions
aidge_core/mem_info.py
with
200 additions
and
12 deletions
aidge_core/export_utils/export_registry.py
+
1
−
1
View file @
1c773b62
...
...
@@ -56,7 +56,7 @@ class ExportLib(): # Should be abstract ?
if
len
(
cls
.
_export_node_registry
[
node
.
type
()])
!=
1
:
raise
RuntimeError
(
"
ExportLib registry doesn
'
t support when multiple export node are available yet ...
"
)
else
:
return
cls
.
_export_node_registry
[
node
.
type
()][
0
]
(
node
)
return
cls
.
_export_node_registry
[
node
.
type
()][
0
]
@classmethod
def
add_export_node
(
cls
,
key
:
str
,
eNode
:
ExportNode
)
->
None
:
if
key
not
in
cls
.
_export_node_registry
:
...
...
This diff is collapsed.
Click to expand it.
aidge_core/export_utils/node_export.py
+
66
−
11
View file @
1c773b62
...
...
@@ -3,6 +3,8 @@ from pathlib import Path
from
aidge_core.export_utils
import
data_conversion
,
code_generation
from
abc
import
ABC
,
abstractmethod
from
typing
import
List
def
get_chan
(
tensor
:
aidge_core
.
Tensor
)
->
int
:
"""
Given a tensor return the number of channel
...
...
@@ -10,9 +12,9 @@ def get_chan(tensor: aidge_core.Tensor) -> int:
dformat
=
tensor
.
dformat
()
dims
=
tensor
.
dims
()
if
dformat
==
aidge_core
.
dformat
.
Default
:
if
len
(
dims
)
==
4
:
# Suppose NCHW
if
len
(
dims
)
==
4
:
# Suppose NCHW
return
dims
[
1
]
elif
len
(
dims
)
==
2
:
# Suppose NC
elif
len
(
dims
)
==
2
:
# Suppose NC
return
dims
[
1
]
else
:
return
None
...
...
@@ -36,9 +38,9 @@ def get_height(tensor: aidge_core.Tensor) -> int:
dformat
=
tensor
.
dformat
()
dims
=
tensor
.
dims
()
if
dformat
==
aidge_core
.
dformat
.
Default
:
if
len
(
dims
)
==
4
:
# Suppose NCHW
if
len
(
dims
)
==
4
:
# Suppose NCHW
return
dims
[
2
]
elif
len
(
dims
)
==
2
:
# Suppose NC
elif
len
(
dims
)
==
2
:
# Suppose NC
return
1
else
:
return
None
...
...
@@ -62,9 +64,9 @@ def get_width(tensor: aidge_core.Tensor) -> int:
dformat
=
tensor
.
dformat
()
dims
=
tensor
.
dims
()
if
dformat
==
aidge_core
.
dformat
.
Default
:
if
len
(
dims
)
==
4
:
# Suppose NCHW
if
len
(
dims
)
==
4
:
# Suppose NCHW
return
dims
[
3
]
elif
len
(
dims
)
==
2
:
# Suppose NC
elif
len
(
dims
)
==
2
:
# Suppose NC
return
1
else
:
return
None
...
...
@@ -111,7 +113,7 @@ class ExportNode(ABC):
"""
@abstractmethod
def
__init__
(
self
,
aidge_node
:
aidge_core
.
Node
)
->
None
:
def
__init__
(
self
,
aidge_node
:
aidge_core
.
Node
,
mem_info
:
List
[
dict
]
)
->
None
:
"""
Create ExportNode and retieve attriubtes from ``aidge_node``:
"""
...
...
@@ -150,6 +152,20 @@ class ExportNode(ABC):
self
.
attributes
[
"
out_height
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
out_width
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
# Producer don't have meminfo
# TODO: document this attribute
# true if node have meminfo else false
self
.
attributes
[
"
meminfo
"
]
=
self
.
node
.
type
()
!=
"
Producer
"
if
self
.
attributes
[
"
meminfo
"
]:
self
.
attributes
[
"
mem_info_size
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
mem_info_offset
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
mem_info_stride
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
mem_info_length
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
mem_info_cont_size
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
mem_info_cont_offset
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
mem_info_wrap_offset
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
self
.
attributes
[
"
mem_info_wrap_size
"
]
=
[
None
]
*
self
.
attributes
[
"
nb_out
"
]
for
idx
,
parent_node_in_id
in
enumerate
(
self
.
node
.
inputs
()):
parent_node
,
out_id
=
parent_node_in_id
self
.
inputs
.
append
(
parent_node
)
...
...
@@ -167,7 +183,8 @@ class ExportNode(ABC):
else
:
print
(
f
"
No input for
{
self
.
node
.
name
()
}
"
)
for
idx
,
list_child_node_in_id
in
enumerate
(
self
.
node
.
outputs
()):
self
.
outputs
+=
[
node_in_id
[
0
]
for
node_in_id
in
list_child_node_in_id
]
self
.
outputs
+=
[
node_in_id
[
0
]
for
node_in_id
in
list_child_node_in_id
]
if
self
.
operator
.
get_output
(
idx
)
is
not
None
:
tensor
=
self
.
operator
.
get_output
(
idx
)
self
.
attributes
[
"
out_name
"
][
idx
]
=
f
"
{
self
.
attributes
[
'
name
'
]
}
_output_
{
idx
}
"
...
...
@@ -179,11 +196,47 @@ class ExportNode(ABC):
self
.
attributes
[
"
out_chan
"
][
idx
]
=
get_chan
(
tensor
)
self
.
attributes
[
"
out_height
"
][
idx
]
=
get_height
(
tensor
)
self
.
attributes
[
"
out_width
"
][
idx
]
=
get_width
(
tensor
)
# Output meminfo
# TODO: add to docstring
if
self
.
attributes
[
"
meminfo
"
]:
if
"
size
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_size
"
][
idx
]
=
mem_info
[
idx
][
"
size
"
]
else
:
raise
RuntimeError
(
"
Size is mandatory
"
)
if
"
offset
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_offset
"
][
idx
]
=
mem_info
[
idx
][
"
offset
"
]
else
:
raise
RuntimeError
(
"
Offset is mandatory
"
)
if
"
stride
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_stride
"
][
idx
]
=
mem_info
[
idx
][
"
stride
"
]
else
:
self
.
attributes
[
"
mem_info_stride
"
][
idx
]
=
mem_info
[
idx
][
"
size
"
]
if
"
length
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_length
"
][
idx
]
=
mem_info
[
idx
][
"
length
"
]
else
:
self
.
attributes
[
"
mem_info_length
"
][
idx
]
=
tensor
.
size
()
if
"
cont_size
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_cont_size
"
][
idx
]
=
mem_info
[
idx
][
"
cont_size
"
]
else
:
self
.
attributes
[
"
mem_info_cont_size
"
][
idx
]
=
tensor
.
size
()
if
"
cont_offset
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_cont_offset
"
][
idx
]
=
mem_info
[
idx
][
"
cont_offset
"
]
else
:
self
.
attributes
[
"
mem_info_cont_offset
"
][
idx
]
=
0
if
"
cont_offset
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_wrap_offset
"
][
idx
]
=
mem_info
[
idx
][
"
wrap_offset
"
]
else
:
self
.
attributes
[
"
mem_info_wrap_offset
"
][
idx
]
=
0
if
"
wrap_size
"
in
mem_info
[
idx
]:
self
.
attributes
[
"
mem_info_wrap_size
"
][
idx
]
=
mem_info
[
idx
][
"
wrap_size
"
]
else
:
self
.
attributes
[
"
mem_info_wrap_size
"
][
idx
]
=
0
else
:
print
(
f
"
No output for
{
self
.
node
.
name
()
}
"
)
@classmethod
@abstractmethod
def
exportable
(
cls
,
node
:
aidge_core
.
Node
)
->
bool
:
def
exportable
(
cls
,
node
:
aidge_core
.
Node
)
->
bool
:
"""
Given a :py:class:`aidge_core.Node` return if the node can be exported or not.
:param node: Node to test the exportability
...
...
@@ -193,6 +246,7 @@ class ExportNode(ABC):
"""
pass
class
ExportNodeCpp
(
ExportNode
):
# Path to the template defining how to export the node definition
config_template
:
str
=
None
...
...
@@ -229,8 +283,9 @@ class ExportNodeCpp(ExportNode):
kernel_path
,
str
(
export_folder
/
"
include
"
/
self
.
kernels_path
)
)
kernel_include_list
.
append
(
self
.
kernels_path
+
"
/
"
+
kernel_path
.
stem
+
kernel_path
.
suffix
)
path_to_definition
=
f
"
layers/
{
self
.
attributes
[
'
name
'
]
}
.h
"
kernel_include_list
.
append
(
self
.
kernels_path
+
"
/
"
+
kernel_path
.
stem
+
kernel_path
.
suffix
)
path_to_definition
=
f
"
layers/
{
self
.
attributes
[
'
name
'
]
}
.h
"
code_generation
.
generate_file
(
str
(
export_folder
/
path_to_definition
),
self
.
config_template
,
...
...
This diff is collapsed.
Click to expand it.
aidge_core/mem_info.py
0 → 100644
+
133
−
0
View file @
1c773b62
import
os
import
shutil
from
pathlib
import
Path
import
aidge_core
from
typing
import
Tuple
,
List
# Default memory management, which can be used for development
def
compute_default_mem_info
(
scheduler
:
aidge_core
.
Scheduler
)
->
Tuple
[
int
,
List
]:
"""
Basic memory management concatenate memory block, no memory reuse !
:param scheduler: Aidge scheduler
:type scheduler: :py:class:`aidge_core.Scheduler`
:return: The total memory size (in number of elements) and a list (of size nb node) of list (of size nb output) of dictionnary (size, offset)
:rtype: Tuple[int, list]
"""
mem_info
=
{}
mem_size
=
0
# Exclude Producers and the last layers (because the results are stored outside the export)
for
i
,
node
in
enumerate
(
scheduler
.
get_static_scheduling
()):
if
node
.
type
()
!=
"
Producer
"
:
node_mem_info
=
[]
for
out_id
in
range
(
node
.
get_nb_outputs
()):
dims
=
node
.
get_operator
().
get_output
(
out_id
).
dims
()
mem
=
1
for
dim
in
dims
:
mem
*=
dim
# Add memeory info
node_mem_info
.
append
({
"
size
"
:
mem
,
"
offset
"
:
mem_size
})
# Increment offset for the next layer
mem_size
+=
mem
print
(
f
"
Adding meminfo to
{
node
.
name
()
}
"
)
mem_info
[
node
]
=
node_mem_info
else
:
mem_info
[
node
]
=
[]
# No meminfo for producer
return
mem_size
,
mem_info
def
generate_optimized_memory_info
(
scheduler
:
aidge_core
.
Scheduler
,
stats_folder
:
Path
,
wrapping
:
bool
=
False
)
->
Tuple
[
int
,
List
[
dict
]]:
# The forward dims has to done outside the function
# Also supposed the generation of the scheduler has been performed outside
# Otherwise decomment the following line
# scheduler.generate_scheduling()
# Generate the memory manager
# So far, the Producers are not take in consideration in the meory manager => inc_producers=False
mem_manager
=
scheduler
.
generate_memory
(
inc_producers
=
False
,
wrap_around_buffer
=
wrapping
)
# List of nodes which are connected at the input of the graph (None if input is not connected)
nodes_at_input
=
[
n
[
0
]
for
n
in
scheduler
.
graph_view
().
inputs
()]
# Use gnuplot to generate the log
try
:
os
.
makedirs
(
str
(
stats_folder
/
"
graph
"
),
exist_ok
=
True
)
mem_manager
.
log
(
"
memory_info
"
)
os
.
chmod
(
"
memory_info_plot.gnu
"
,
0o777
)
os
.
system
(
"
./memory_info_plot.gnu
"
)
shutil
.
move
(
"
memory_info
"
,
str
(
stats_folder
/
"
graph
"
/
"
memory_info
"
))
shutil
.
move
(
"
memory_info_plot.png
"
,
str
(
stats_folder
/
"
graph
"
/
"
memory_info_plot.png
"
))
os
.
remove
(
"
memory_info_plot.gnu
"
)
except
:
print
(
"
Please install gnuplot if you want memory plot from MemoryManager.
"
)
# In the export, we currently use an unified memory buffer whose size
# is determined by the memory peak usage
mem_size
=
mem_manager
.
get_peak_usage
()
mem_info
=
[]
mem_planes
=
mem_manager
.
get_planes
()
for
node
in
scheduler
.
get_static_scheduling
():
if
node
.
type
()
==
"
Producer
"
:
continue
# Skipping memory management for producers
if
node
in
nodes_at_input
:
# Input memory management (suppose tensor ends with [:, channel, height, width]))
tensor
=
node
.
get_operator
().
get_output
(
0
)
if
tensor
is
None
:
raise
RuntimeError
(
"
Warning input producer not provided
"
)
if
len
(
tensor
.
dims
())
<
3
:
raise
RuntimeError
(
f
"
Input producer dimensions must be with [:, channel, height, width] but got
{
tensor
.
dims
()
}
instead
"
)
name
=
node
.
name
()
offset
=
0
# Suppose input data is stored outside the export function
# so the memory offset is not important to consider
# TODO : use get_chan get_height and get_width function !
size
=
tensor
.
dims
()[
-
3
]
# Should be nb_channels
stride
=
tensor
.
dims
()[
-
3
]
# Should be nb_channels
length
=
tensor
.
dims
()[
-
1
]
# Should be width
count
=
tensor
.
dims
()[
-
2
]
# Should be height
cont_offset
=
0
# Suppose input data is stored outside the export function
# so the memory offset is not important to consider
# Size of input
cont_size
=
tensor
.
dims
()[
-
1
]
*
\
tensor
.
dims
()[
-
2
]
*
tensor
.
dims
()[
-
3
]
wrap_offset
=
0
# No wrapping
wrap_size
=
0
# No wrapping
else
:
plane
=
mem_planes
[
node
][
0
]
name
=
node
.
name
()
offset
=
plane
.
offset
size
=
plane
.
size
stride
=
plane
.
stride
length
=
plane
.
length
count
=
plane
.
count
cont_offset
=
plane
.
get_contiguous_offset
()
cont_size
=
plane
.
get_contiguous_size
()
wrap_offset
=
plane
.
get_wrapped_offset
()
wrap_size
=
plane
.
get_wrapped_size
()
mem_info
.
append
({
"
layer_name
"
:
name
,
"
size
"
:
size
,
"
offset
"
:
offset
,
"
stride
"
:
stride
,
"
length
"
:
length
,
"
count
"
:
count
,
"
cont_offset
"
:
cont_offset
,
"
cont_size
"
:
cont_size
,
"
wrap_offset
"
:
wrap_offset
,
"
wrap_size
"
:
wrap_size
})
return
mem_size
,
mem_info
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment