Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_export_cpp
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Eclipse Projects
aidge
aidge_export_cpp
Commits
80633c31
Commit
80633c31
authored
3 weeks ago
by
Axel Farrugia
Browse files
Options
Downloads
Patches
Plain Diff
[Refactor] Adapt export scripts to the latest aidge_quantization update
parent
0d3f723f
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
examples/export_LeNet/lenet.py
+19
-17
19 additions, 17 deletions
examples/export_LeNet/lenet.py
examples/export_ResNet18/resnet18.py
+18
-19
18 additions, 19 deletions
examples/export_ResNet18/resnet18.py
with
37 additions
and
36 deletions
examples/export_LeNet/lenet.py
+
19
−
17
View file @
80633c31
...
...
@@ -22,7 +22,7 @@ from aidge_export_cpp.export_utils import (
set_nodes_datatypes
,
exclude_unwanted_producers
)
from
aidge_core.export_utils
import
remove_optional_inputs
from
aidge_core.export_utils
import
remove_optional_inputs
,
get_node_from_metaop
# Torch (Dataset)
import
torch
...
...
@@ -89,8 +89,7 @@ Export configuration details :
- OPTIM_SIGN : Quantization optional optimization based on data sign.
- SINGLE_SHIFT : Quantization option specifying if inserted scaling nodes should be
single shift or floating point.
- ROUNDING : Apply rounding on the data after the single shift step.
- NO_QUANTIZATION : Skip the quantization step.
- NO_QUANT : Skip the quantization step.
- CLIPPING : Clipping method during quantization.
- FOLD_GRAPH : The quantization step adds cast nodes to cast the graph into the given TARGET_TYPE.
Enabling the FOLD_GRAPH will automatically fold these nodes into the following
...
...
@@ -136,7 +135,7 @@ DO_EXAMPLES = True
OPTIM_SIGN
=
False
SINGLE_SHIFT
=
True
ROUNDING
=
True
NO_QUANT
IZATION
=
False
NO_QUANT
=
False
CLIPPING
=
aidge_quantization
.
Clipping
.
MSE
# 'MAX'
FOLD_GRAPH
=
True
...
...
@@ -150,12 +149,11 @@ print(' NB_TEST = ', NB_TEST)
print
(
'
NB_CALIB =
'
,
NB_CALIB
)
print
(
'
NB_BITS =
'
,
NB_BITS
)
print
(
'
OPTIM_SIGN =
'
,
OPTIM_SIGN
)
print
(
'
NO_QUANT
IZATION
=
'
,
NO_QUANT
IZATION
)
print
(
'
NO_QUANT
=
'
,
NO_QUANT
)
print
(
'
CLIPPING =
'
,
CLIPPING
)
print
(
'
SINGLE_SHIFT =
'
,
SINGLE_SHIFT
)
print
(
'
USE_CUDA =
'
,
USE_CUDA
)
print
(
'
DEV_MODE =
'
,
DEV_MODE
)
print
(
'
ROUNDING =
'
,
ROUNDING
)
torch
.
manual_seed
(
RNG_SEED
)
random
.
seed
(
RNG_SEED
)
...
...
@@ -207,11 +205,13 @@ for input, label in test_set:
Load the .onnx model and perform some usual graph modifications :
- Remove the flatten nodes;
- Fuse the batchnorm nodes into the biases producers.
- Expand the metaOperators to perform the desired fusions.
"""
model
=
aidge_onnx
.
load_onnx
(
MODEL_NAME
+
"
.onnx
"
,
verbose
=
False
)
aidge_core
.
remove_flatten
(
model
)
aidge_core
.
fuse_batchnorm
(
model
)
aidge_core
.
expand_metaops
(
model
)
model
.
save
(
"
imported_model
"
)
# --------------------------------------------------------------
...
...
@@ -269,15 +269,24 @@ if quantize_model:
aidge_quantization
.
quantize_network
(
network
=
model
,
nb_bits
=
NB_BITS
,
input_data
set
=
tensors
[
0
:
NB_CALIB
],
calibration_
set
=
tensors
[
0
:
NB_CALIB
],
clipping_mode
=
CLIPPING
,
target_type
=
TARGET_TYPE
,
no_quant
ization
=
NO_QUANT
IZATION
,
no_quant
=
NO_QUANT
,
optimize_signs
=
OPTIM_SIGN
,
single_shift
=
SINGLE_SHIFT
,
use_cuda
=
USE_CUDA
,
fold_graph
=
FOLD_GRAPH
,
bitshift_rounding
=
ROUNDING
)
fold_graph
=
FOLD_GRAPH
)
# Tag the scaling producers
for
node
in
model
.
get_nodes
():
if
node
.
type
()
==
"
Quantizer
"
:
for
SNode
in
get_node_from_metaop
(
node
,
"
BitShift
"
):
SNode
.
get_parent
(
1
).
attributes
().
shift_prod
=
True
for
CNode
in
get_node_from_metaop
(
node
,
"
Mul
"
):
CNode
.
get_parent
(
1
).
attributes
().
coef_prod
=
True
model
.
save
(
"
post_ptq_model
"
)
# --------------------------------------------------------------
# RESCALE THE INPUT SAMPLES
...
...
@@ -358,13 +367,6 @@ In this step, we use graph regex techniques to find the desired patterns
within the graph in order to match the export implementation of the kernels.
"""
# Expand meta ops
"""
We first need to expand the graph to break all the metaops that may already
exist. For instance, PaddedConv will become Pad -> Conv.
"""
aidge_core
.
expand_metaops
(
model
)
# Exclude unwanted producers
"""
Before fusing the nodes, we set a tag on the Producers in order to exclude
...
...
This diff is collapsed.
Click to expand it.
examples/export_ResNet18/resnet18.py
+
18
−
19
View file @
80633c31
...
...
@@ -27,7 +27,7 @@ from aidge_export_cpp.export_utils import (
set_nodes_datatypes
,
normalize
)
from
aidge_core.export_utils
import
remove_optional_inputs
from
aidge_core.export_utils
import
remove_optional_inputs
,
get_node_from_metaop
# Torch (Dataset)
import
torch
...
...
@@ -94,8 +94,7 @@ Export configuration details :
- OPTIM_SIGN : Quantization optional optimization based on data sign.
- SINGLE_SHIFT : Quantization option specifying if inserted scaling nodes should be
single shift or floating point.
- ROUNDING : Apply rounding on the data after the single shift step.
- NO_QUANTIZATION : Skip the quantization step. Should be set to False.
- NO_QUANT : Skip the quantization step. Should be set to False.
- CLIPPING : Clipping method during quantization.
- FOLD_GRAPH : The quantization step adds cast nodes to cast the graph into the given TARGET_TYPE.
Enabling the FOLD_GRAPH will automatically fold these nodes into the following
...
...
@@ -161,14 +160,13 @@ def print_cfg():
print
(
'
NB_CALIB =
'
,
NB_CALIB
)
print
(
'
NB_BITS =
'
,
NB_BITS
)
print
(
'
OPTIM_SIGN =
'
,
OPTIM_SIGN
)
print
(
'
NO_QUANT
IZATION
=
'
,
NO_QUANT
IZATION
)
print
(
'
NO_QUANT
=
'
,
NO_QUANT
)
print
(
'
CLIPPING =
'
,
CLIPPING
)
print
(
'
SINGLE_SHIFT =
'
,
SINGLE_SHIFT
)
print
(
'
TARGET_TYPE =
'
,
TARGET_TYPE
)
print
(
'
FOLD_GRAPH =
'
,
FOLD_GRAPH
)
print
(
'
USE_CUDA =
'
,
USE_CUDA
)
print
(
'
DEV_MODE =
'
,
DEV_MODE
)
print
(
'
ROUNDING =
'
,
ROUNDING
)
print_cfg
()
...
...
@@ -239,12 +237,14 @@ for tensor in tensors:
Load the .onnx model and perform some usual graph modifications :
- Remove the flatten nodes;
- Fuse the batchnorm nodes into the biases producers.
- Expand the metaOperators to perform the desired fusions.
"""
model
=
aidge_onnx
.
load_onnx
(
MODEL_NAME
+
"
.onnx
"
,
verbose
=
False
)
model
.
save
(
"
imported_model
"
)
aidge_core
.
remove_flatten
(
model
)
aidge_core
.
fuse_batchnorm
(
model
)
aidge_core
.
expand_metaops
(
model
)
model
.
save
(
"
imported_model_fused_bn
"
)
# --------------------------------------------------------------
...
...
@@ -301,15 +301,24 @@ if quantize_model:
aidge_quantization
.
quantize_network
(
network
=
model
,
nb_bits
=
NB_BITS
,
input_data
set
=
aidge_tensors
[
0
:
NB_CALIB
],
calibration_
set
=
aidge_tensors
[
0
:
NB_CALIB
],
clipping_mode
=
CLIPPING
,
target_type
=
TARGET_TYPE
,
no_quant
ization
=
NO_QUANT
IZATION
,
no_quant
=
NO_QUANT
,
optimize_signs
=
OPTIM_SIGN
,
single_shift
=
SINGLE_SHIFT
,
use_cuda
=
USE_CUDA
,
fold_graph
=
FOLD_GRAPH
,
bitshift_rounding
=
ROUNDING
)
fold_graph
=
FOLD_GRAPH
)
# Tag the scaling producers
for
node
in
model
.
get_nodes
():
if
node
.
type
()
==
"
Quantizer
"
:
for
SNode
in
get_node_from_metaop
(
node
,
"
BitShift
"
):
SNode
.
get_parent
(
1
).
attributes
().
shift_prod
=
True
for
CNode
in
get_node_from_metaop
(
node
,
"
Mul
"
):
CNode
.
get_parent
(
1
).
attributes
().
coef_prod
=
True
model
.
save
(
"
post_ptq_model
"
)
# --------------------------------------------------------------
# RESCALE THE INPUT SAMPLES
...
...
@@ -382,16 +391,6 @@ In this step, we use graph regex techniques to find the desired patterns
within the graph in order to match the export implementation of the kernels.
"""
# Expand meta ops
"""
We first need to expand the graph to break all the metaops that may already
exist. For instance, PaddedConv will become Pad -> Conv.
"""
aidge_core
.
expand_metaops
(
model
)
model
.
save
(
"
after_expand
"
)
# Exclude unwanted producers
"""
Before fusing the nodes, we set a tag on the Producers in order to exclude
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment