Skip to content
Snippets Groups Projects
Commit 092cb4bd authored by Cyril Moineau's avatar Cyril Moineau
Browse files

Adapt aidge mem_info to new API

parent c7d9e41e
No related branches found
No related tags found
No related merge requests found
......@@ -70,14 +70,14 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder
# In the export, we currently use an unified memory buffer whose size
# is determined by the memory peak usage
mem_size = mem_manager.get_peak_usage()
mem_info = []
mem_info = {}
mem_planes = mem_manager.get_planes()
for node in scheduler.get_static_scheduling():
if node.type() == "Producer":
continue # Skipping memory management for producers
if node in nodes_at_input:
mem_info[node] = [] # No meminfo for producer
elif node in nodes_at_input:
# Input memory management (suppose tensor ends with [:, channel, height, width]))
tensor = node.get_operator().get_output(0)
if tensor is None:
......@@ -86,47 +86,49 @@ def generate_optimized_memory_info(scheduler: aidge_core.Scheduler, stats_folder
raise RuntimeError(
f"Input producer dimensions must be with [:, channel, height, width] but got {tensor.dims()} instead")
name = node.name()
offset = 0 # Suppose input data is stored outside the export function
# so the memory offset is not important to consider
# TODO : use get_chan get_height and get_width function !
size = tensor.dims()[-3] # Should be nb_channels
stride = tensor.dims()[-3] # Should be nb_channels
length = tensor.dims()[-1] # Should be width
count = tensor.dims()[-2] # Should be height
cont_offset = 0 # Suppose input data is stored outside the export function
# so the memory offset is not important to consider
# Size of input
cont_size = tensor.dims()[-1] * \
tensor.dims()[-2] * tensor.dims()[-3]
wrap_offset = 0 # No wrapping
wrap_size = 0 # No wrapping
node_mem_info.append({
"size": tensor.dims()[-3], # Should be nb_channels
"offset": 0, # Suppose input data is stored outside the export function
# so the memory offset is not important to consider
"stride": tensor.dims()[-3], # Should be nb_channels
"length": tensor.dims()[-1], # Should be width
"count": tensor.dims()[-2], # Should be height
"cont_offset": 0, # Suppose input data is stored outside the export function
# so the memory offset is not important to consider
"cont_size": tensor.dims()[-1] * \
tensor.dims()[-2] * \
tensor.dims()[-3], # Size of input
"wrap_offset": 0, # No wrapping
"wrap_size": 0 # No wrapping
})
mem_info[node] = [{
"size": plane.size,
"offset": plane.offset,
"stride": plane.stride,
"length": plane.length,
"count": plane.count,
"cont_offset": plane.get_contiguous_offset(),
"cont_size": plane.get_contiguous_size(),
"wrap_offset": plane.get_wrapped_offset(),
"wrap_size": plane.get_wrapped_size()
}]
else:
plane = mem_planes[node][0]
name = node.name()
offset = plane.offset
size = plane.size
stride = plane.stride
length = plane.length
count = plane.count
cont_offset = plane.get_contiguous_offset()
cont_size = plane.get_contiguous_size()
wrap_offset = plane.get_wrapped_offset()
wrap_size = plane.get_wrapped_size()
mem_info.append({
"layer_name": name,
"size": size,
"offset": offset,
"stride": stride,
"length": length,
"count": count,
"cont_offset": cont_offset,
"cont_size": cont_size,
"wrap_offset": wrap_offset,
"wrap_size": wrap_size
})
node_mem_info = []
for out_id in range(node.get_nb_outputs()):
plane = mem_planes[node][out_id]
node_mem_info.append({
"size": plane.size,
"offset": plane.offset,
"stride": plane.stride,
"length": plane.length,
"count": plane.count,
"cont_offset": plane.get_contiguous_offset(),
"cont_size": plane.get_contiguous_size(),
"wrap_offset": plane.get_wrapped_offset(),
"wrap_size": plane.get_wrapped_size()
})
mem_info[node] = node_mem_info
return mem_size, mem_info
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment