Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_core
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Eclipse Projects
aidge
aidge_core
Commits
43b94a02
Commit
43b94a02
authored
8 months ago
by
Cyril Moineau
Browse files
Options
Downloads
Plain Diff
Merge branch 'dev' into AidgeExport
parents
19d735e2
af845fe7
No related branches found
Branches containing commit
No related tags found
Tags containing commit
2 merge requests
!152
Update Aidge export to take a graph view has an argument instead of a...
,
!115
Aidge export
Pipeline
#48856
passed
8 months ago
Stage: static_analysis
Stage: build
Stage: test
Stage: coverage
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
include/aidge/operator/Producer.hpp
+4
-3
4 additions, 3 deletions
include/aidge/operator/Producer.hpp
src/recipes/FuseBatchNorm.cpp
+51
-5
51 additions, 5 deletions
src/recipes/FuseBatchNorm.cpp
with
55 additions
and
8 deletions
include/aidge/operator/Producer.hpp
+
4
−
3
View file @
43b94a02
...
...
@@ -133,19 +133,20 @@ inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, cons
}
template
<
std
::
array
<
DimSize_t
,
1
>
::
size_type
DIM
>
void
addProducer
(
std
::
shared_ptr
<
Node
>&
otherNode
,
const
IOIndex_t
inputIdx
,
const
std
::
array
<
DimSize_t
,
DIM
>&
dims
,
const
std
::
string
&
extension
)
{
std
::
shared_ptr
<
Node
>
addProducer
(
std
::
shared_ptr
<
Node
>&
otherNode
,
const
IOIndex_t
inputIdx
,
const
std
::
array
<
DimSize_t
,
DIM
>&
dims
,
const
std
::
string
&
extension
)
{
assert
(
inputIdx
!=
gk_IODefaultIndex
);
static_assert
(
DIM
<=
MaxDim
,
"Too many tensor dimensions required by addProducer, not supported"
);
const
std
::
string
prodName
=
(
otherNode
->
name
().
empty
())
?
""
:
(
otherNode
->
name
()
+
std
::
string
(
"_"
)
+
extension
);
auto
prod
=
Producer
(
dims
,
prodName
);
prod
->
addChild
(
otherNode
,
0
,
inputIdx
);
otherNode
->
getOperator
()
->
associateInput
(
inputIdx
,
prod
->
getOperator
()
->
getRawOutput
(
0
));
return
prod
;
}
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template
<
std
::
size_t
DIM
>
void
addProducer
(
std
::
shared_ptr
<
Node
>&
otherNode
,
const
IOIndex_t
inputIdx
,
DimSize_t
const
(
&
dims
)[
DIM
],
const
std
::
string
&
extension
)
{
addProducer
(
otherNode
,
inputIdx
,
to_array
(
dims
),
extension
);
std
::
shared_ptr
<
Node
>
addProducer
(
std
::
shared_ptr
<
Node
>&
otherNode
,
const
IOIndex_t
inputIdx
,
DimSize_t
const
(
&
dims
)[
DIM
],
const
std
::
string
&
extension
)
{
return
addProducer
(
otherNode
,
inputIdx
,
to_array
(
dims
),
extension
);
}
}
// namespace Aidge
...
...
This diff is collapsed.
Click to expand it.
src/recipes/FuseBatchNorm.cpp
+
51
−
5
View file @
43b94a02
...
...
@@ -32,10 +32,12 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
std
::
shared_ptr
<
Aidge
::
Node
>
batchnormNode
)
{
// Case: convNode is a MetaOperator ending with a Convolution
// eg. PaddedConv
std
::
shared_ptr
<
Node
>
metaNode
;
if
(
!
(
convNode
->
getOperator
()
->
isAtomic
()))
{
const
std
::
shared_ptr
<
MetaOperator_Op
>
metaNode
=
std
::
static_pointer_cast
<
MetaOperator_Op
>
(
convNode
->
getOperator
());
const
std
::
shared_ptr
<
GraphView
>
metanodeGraph
=
metaNode
->
getMicroGraph
();
const
std
::
vector
<
std
::
pair
<
std
::
shared_ptr
<
Node
>
,
IOIndex_t
>>
outputNodes
=
metanodeGraph
->
getOrderedOutputs
();
metaNode
=
convNode
;
const
auto
metaOp
=
std
::
static_pointer_cast
<
MetaOperator_Op
>
(
convNode
->
getOperator
());
const
std
::
shared_ptr
<
GraphView
>
metaOpGraph
=
metaOp
->
getMicroGraph
();
const
std
::
vector
<
std
::
pair
<
std
::
shared_ptr
<
Node
>
,
IOIndex_t
>>
outputNodes
=
metaOpGraph
->
getOrderedOutputs
();
if
(
outputNodes
.
size
()
!=
1
)
{
AIDGE_THROW_OR_ABORT
(
std
::
runtime_error
,
"Bad MetaOperator argument for fuseBatchNorm recipie."
);
}
...
...
@@ -68,6 +70,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
convNbOutChannels
=
convOpPtr
->
nbChannels
();
kernelDims
=
convOpPtr
->
getAttr
<
std
::
array
<
DimSize_t
,
2
>>
(
"KernelDims"
);
}
AIDGE_ASSERT
(
kernelDims
.
size
()
==
2
,
"fuseBatchNorm(): only 2D convolutions are supported"
);
std
::
shared_ptr
<
Tensor
>
scaleBuf
,
shiftBuf
,
b_meanBuf
,
b_varBuf
;
const
Tensor
&
scale
=
batchOp
->
getInput
(
1
)
->
refCastFrom
(
scaleBuf
,
DataType
::
Float32
,
"cpu"
);
...
...
@@ -98,6 +101,51 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
fmt
::
print
(
"Warning: variance < 1e-12 for all outputs! Is the network correctly trained?
\n
"
);
}
// Add bias if it is non existant, as there will be a bias after the fuse
if
(
!
convOp
->
getInput
(
2
))
{
if
(
metaNode
)
{
// Conv is inside a meta-operator, we add bias outside it
// Find the correct input index of the meta-operator corresponding
// to the bias:
const
auto
metaOp
=
std
::
static_pointer_cast
<
MetaOperator_Op
>
(
metaNode
->
getOperator
());
const
auto
metaOpGraph
=
metaOp
->
getMicroGraph
();
IOIndex_t
inputIdx
=
0
;
for
(
auto
input
:
metaOpGraph
->
getOrderedInputs
())
{
if
(
input
.
first
==
convNode
&&
input
.
second
==
2
)
{
break
;
}
++
inputIdx
;
}
auto
prod
=
addProducer
(
metaNode
,
inputIdx
,
{
convNbOutChannels
},
"b"
);
// Add the new bias node to the same views as the meta node
for
(
auto
g
:
metaNode
->
views
())
{
g
->
add
(
prod
);
}
}
else
{
auto
prod
=
addProducer
(
convNode
,
2
,
{
convNbOutChannels
},
"b"
);
if
(
convNode
->
input
(
1
).
first
)
{
// Add the new bias node to the same views as the weights node
// if possible
for
(
auto
g
:
convNode
->
input
(
1
).
first
->
views
())
{
g
->
add
(
prod
);
}
}
else
{
for
(
auto
g
:
convNode
->
views
())
{
g
->
add
(
prod
);
}
}
}
AIDGE_INTERNAL_ASSERT
(
convOp
->
getInput
(
2
)
!=
nullptr
);
// Use the same backend for the bias than for the weights
convOp
->
getInput
(
2
)
->
setBackend
(
convOp
->
getInput
(
1
)
->
backend
());
convOp
->
getInput
(
2
)
->
zeros
();
}
std
::
shared_ptr
<
Tensor
>
weightBuf
,
biasBuf
;
Tensor
&
weight
=
convOp
->
getInput
(
1
)
->
refCastFrom
(
weightBuf
,
DataType
::
Float32
,
"cpu"
);
Tensor
&
bias
=
convOp
->
getInput
(
2
)
->
refCastFrom
(
biasBuf
,
DataType
::
Float32
,
"cpu"
);
...
...
@@ -112,7 +160,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
?
b_var
.
get
<
float
>
(
outChId
)
:
meanVariance
));
// Weights adjustments
for
(
std
::
size_t
channel
=
0
;
channel
<
channelsSize
;
++
channel
)
{
// TODO : Suppose kerneldims = 2
for
(
std
::
size_t
k0
=
0
;
k0
<
kernelDims
[
0
];
++
k0
)
{
for
(
std
::
size_t
k1
=
0
;
k1
<
kernelDims
[
1
];
++
k1
)
{
std
::
vector
<
DimSize_t
>
currentIdx
=
{
outChId
,
channel
,
k0
,
k1
};
...
...
@@ -122,7 +169,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
}
}
// TODO : check if noBias==true is set, then set biasValue to 0
float
biasValue
=
bias
.
get
<
float
>
(
outChId
);
biasValue
=
shift
.
get
<
float
>
(
outChId
)
+
(
biasValue
-
b_mean
.
get
<
float
>
(
outChId
))
*
factor
;
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment