Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_backend_cpu
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Jerome Hue
aidge_backend_cpu
Commits
d4d09c91
Commit
d4d09c91
authored
4 months ago
by
Jerome Hue
Browse files
Options
Downloads
Patches
Plain Diff
chore: Clean and improve the Leaky MetaOperator test
parent
e3662e43
No related branches found
No related tags found
No related merge requests found
Pipeline
#67404
canceled
4 months ago
Stage: static_analysis
Stage: build
Stage: test
Stage: coverage
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
unit_tests/operator/Test_MetaOperator.cpp
+55
-111
55 additions, 111 deletions
unit_tests/operator/Test_MetaOperator.cpp
with
55 additions
and
111 deletions
unit_tests/operator/Test_MetaOperator.cpp
+
55
−
111
View file @
d4d09c91
...
...
@@ -750,155 +750,99 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
std
::
random_device
rd
;
std
::
mt19937
gen
(
rd
());
std
::
uniform_real_distribution
<
float
>
valueDist
(
0.1
f
,
1.1
f
);
// Random float distribution between 0 and 1
std
::
uniform_int_distribution
<
std
::
size_t
>
dimSizeDist
(
std
::
size_t
(
2
),
std
::
size_t
(
4
));
std
::
uniform_int_distribution
<
std
::
size_t
>
nbDimsDist
(
std
::
size_t
(
3
),
std
::
size_t
(
3
));
std
::
uniform_real_distribution
<
float
>
valueDist
(
0.1
f
,
1.1
f
);
std
::
uniform_int_distribution
<
std
::
size_t
>
dimSizeDist
(
2
,
4
);
std
::
uniform_int_distribution
<
std
::
size_t
>
nbDimsDist
(
3
,
3
);
// fixed to 3.
std
::
uniform_int_distribution
<
int
>
boolDist
(
0
,
1
);
std
::
uniform_real_distribution
<
float
>
betaDist
(
0
,
1
);
std
::
uniform_real_distribution
<
float
>
thresholDist
(
0.1
,
3
);
const
std
::
size_t
nbDims
=
nbDimsDist
(
gen
);
Log
::
info
(
"Nbdims : {}"
,
nbDims
);
std
::
vector
<
std
::
size_t
>
dims
;
for
(
std
::
size_t
i
=
0
;
i
<
nbDims
;
++
i
)
{
dims
.
push_back
(
dimSizeDist
(
gen
));
}
Log
::
info
(
"timesteps : {}"
,
dims
[
0
]);
Log
::
info
(
"dimensions : "
);
for
(
auto
dim
:
dims
)
{
Log
::
info
(
"{}"
,
dim
);
}
const
auto
nbTimeSteps
=
dims
[
0
];
const
auto
beta
=
betaDist
(
gen
);
const
auto
threshold
=
thresholDist
(
gen
);
const
auto
nbDims
=
nbDimsDist
(
gen
);
std
::
vector
<
std
::
size_t
>
dims
(
nbDims
);
std
::
generate
(
dims
.
begin
(),
dims
.
end
(),
[
&
]()
{
return
dimSizeDist
(
gen
);
});
const
auto
nbTimeSteps
=
dims
[
0
];
auto
myLeaky
=
Leaky
(
nbTimeSteps
,
beta
,
1.0
,
LeakyReset
::
Subtraction
,
"leaky"
);
auto
op
=
std
::
static_pointer_cast
<
MetaOperator_Op
>
(
myLeaky
->
getOperator
());
// auto stack = Stack(2);
auto
mem_rec
=
Stack
(
nbTimeSteps
,
"mem_rec"
);
auto
spk_rec
=
Stack
(
nbTimeSteps
,
"spk_rec"
);
auto
pop
=
Pop
(
"popinput"
);
auto
myLeaky
=
Leaky
(
nbTimeSteps
,
beta
,
threshold
,
LeakyReset
::
Subtraction
,
"leaky"
);
auto
op
=
std
::
static_pointer_cast
<
MetaOperator_Op
>
(
myLeaky
->
getOperator
());
auto
memoryRecord
=
Stack
(
nbTimeSteps
,
"mem_rec"
);
auto
spikeRecord
=
Stack
(
nbTimeSteps
,
"spk_rec"
);
auto
pop
=
Pop
(
"input"
);
// Here we test LSTM as it is was flatten in the graph.
// We just borrow its micro-graph into our larger myGraph graph.
auto
myGraph
=
std
::
make_shared
<
GraphView
>
();
auto
leakyOutputs
=
op
->
getMicroGraph
()
->
getOrderedOutputs
();
auto
leakyInputs
=
op
->
getMicroGraph
()
->
getOrderedInputs
();
pop
->
addChild
(
leakyInputs
[
0
].
first
,
0
,
0
);
leakyOutputs
[
1
].
first
->
addChild
(
memoryRecord
,
0
,
0
);
leakyOutputs
[
0
].
first
->
addChild
(
spikeRecord
,
0
,
0
);
pop
->
addChild
(
op
->
getMicroGraph
()
->
getOrderedInputs
()[
0
].
first
,
0
,
0
);
// 0 for mem 1 for stack
op
->
getMicroGraph
()
->
getOrderedOutputs
()[
1
].
first
->
addChild
(
mem_rec
,
0
,
0
);
op
->
getMicroGraph
()
->
getOrderedOutputs
()[
0
].
first
->
addChild
(
spk_rec
,
0
,
0
);
for
(
auto
node
:
op
->
getMicroGraph
()
->
getOrderedOutputs
())
{
Log
::
info
(
"name of output {}"
,
node
.
first
->
name
());
}
myGraph
->
add
(
pop
);
auto
myGraph
=
std
::
make_shared
<
GraphView
>
();
myGraph
->
add
(
op
->
getMicroGraph
());
myGraph
->
add
(
mem_rec
);
myGraph
->
add
(
spk_rec
);
myGraph
->
save
(
"mg"
,
true
,
true
);
myGraph
->
add
({
pop
,
memoryRecord
,
spikeRecord
});
// 3 outputs
REQUIRE
(
myLeaky
->
nbInputs
()
==
3
);
REQUIRE
(
myLeaky
->
inputCategory
(
0
)
==
InputCategory
::
Data
);
// Two spikes connected to nothing, + the Add node real output
REQUIRE
(
myLeaky
->
nbOutputs
()
==
4
);
std
::
shared_ptr
<
Tensor
>
myInput
=
std
::
make_shared
<
Tensor
>
(
Array3D
<
float
,
2
,
3
,
2
>
{{{{
1.0
,
2.0
},
{
3.0
,
4.0
},
{
5.0
,
6.0
}},
{{
2.0
,
3.0
},
{
4.0
,
5.0
},
{
6.0
,
7.0
}}}});
// std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(
// Array3D<float, 2, 3, 2>{{{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}},
// {{2.0, 3.0}, {4.0, 5.0},
// {6.0, 7.0}}}});
// Generate input
std
::
shared_ptr
<
Tensor
>
T0
=
std
::
make_shared
<
Tensor
>
();
T0
->
setDataType
(
DataType
::
Float32
);
T0
->
setBackend
(
"cpu"
);
std
::
shared_ptr
<
Tensor
>
expectedOutput
=
std
::
make_shared
<
Tensor
>
();
expectedOutput
->
setDataType
(
DataType
::
Float32
);
expectedOutput
->
setBackend
(
"cpu"
);
const
auto
nb_elements
=
std
::
accumulate
(
dims
.
cbegin
(),
dims
.
cend
(),
std
::
size_t
(
1
),
std
::
multiplies
<
std
::
size_t
>
());
float
*
input
=
new
float
[
nb_elements
];
float
*
result
=
new
float
[
nb_elements
];
const
auto
nbElementsPerTimeStep
=
nb_elements
/
dims
[
0
];
for
(
std
::
size_t
i
=
0
;
i
<
nb_elements
;
++
i
)
{
input
[
i
]
=
valueDist
(
gen
);
}
T0
->
resize
(
dims
);
T0
->
getImpl
()
->
setRawPtr
(
input
,
nb_elements
);
T0
->
print
();
// Elements popped at each time step
auto
nbElementsPerTimeStep
=
nb_elements
/
dims
[
0
];
// Compute the expected result using ad-hoc implementation
// Init
for
(
int
i
=
0
;
i
<
nbElementsPerTimeStep
;
++
i
)
{
result
[
i
]
=
input
[
i
];
}
// Reccurence
for
(
int
i
=
1
;
i
<
dims
[
0
];
++
i
)
{
auto
offset
=
nbElementsPerTimeStep
*
i
;
auto
prev
=
nbElementsPerTimeStep
*
(
i
-
1
);
for
(
int
j
=
0
;
j
<
nbElementsPerTimeStep
;
++
j
)
{
auto
reset
=
(
result
[
prev
+
j
]
>
1.0
?
1
:
0
);
result
[
offset
+
j
]
=
result
[
prev
+
j
]
*
beta
+
input
[
offset
+
j
]
-
reset
;
auto
*
input
=
new
float
[
nb_elements
];
std
::
generate_n
(
input
,
nb_elements
,
[
&
]()
{
return
valueDist
(
gen
);
});
auto
*
result
=
new
float
[
nb_elements
];
std
::
copy
(
input
,
input
+
nbElementsPerTimeStep
,
result
);
// Recurrence calculation for each timestep
for
(
int
timestep
=
1
;
timestep
<
nbTimeSteps
;
++
timestep
)
{
const
auto
currentOffset
=
nbElementsPerTimeStep
*
timestep
;
const
auto
previousOffset
=
nbElementsPerTimeStep
*
(
timestep
-
1
);
for
(
int
element
=
0
;
element
<
nbElementsPerTimeStep
;
++
element
)
{
const
auto
previousValue
=
result
[
previousOffset
+
element
];
const
auto
resetValue
=
(
previousValue
>
threshold
)
?
threshold
:
0
;
result
[
currentOffset
+
element
]
=
previousValue
*
beta
+
input
[
currentOffset
+
element
]
-
resetValue
;
}
}
auto
expectedOutput
=
std
::
make_shared
<
Tensor
>
(
DataType
::
Float32
);
expectedOutput
->
setBackend
(
"cpu"
);
expectedOutput
->
resize
(
dims
);
expectedOutput
->
getImpl
()
->
setRawPtr
(
result
,
nb_elements
);
Log
::
info
(
"Expected ouptut : "
);
expectedOutput
->
print
();
std
::
shared_ptr
<
Tensor
>
myInit
=
std
::
make_shared
<
Tensor
>
(
Array2D
<
float
,
3
,
3
>
{
{{
0.0
,
0.0
,
0.0
},
{
0.0
,
0.0
,
0.0
},
{
0.0
,
0.0
,
0.0
}}});
auto
initMemdims
=
std
::
vector
<
std
::
size_t
>
(
dims
.
begin
()
+
1
,
dims
.
end
());
Log
::
info
(
"dimensions : "
);
for
(
auto
dim
:
initMemdims
)
{
Log
::
info
(
"{}"
,
dim
);
}
std
::
shared_ptr
<
Tensor
>
myInitW
=
std
::
make_shared
<
Tensor
>
(
Array2D
<
float
,
3
,
2
>
{{{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
}}});
// Compute the real result using our operator implemenation
auto
inputTensor
=
std
::
make_shared
<
Tensor
>
(
DataType
::
Float32
);
inputTensor
->
setBackend
(
"cpu"
);
inputTensor
->
resize
(
dims
);
inputTensor
->
getImpl
()
->
setRawPtr
(
input
,
nb_elements
);
std
::
shared_ptr
<
Tensor
>
myInitR
=
std
::
make_shared
<
Tensor
>
(
initMemdims
);
myInitR
->
setDataType
(
DataType
::
Float32
);
myInitR
->
setBackend
(
"cpu"
);
uniformFiller
<
float
>
(
myInitR
,
0
,
0
);
auto
memoryInit
=
std
::
make_shared
<
Tensor
>
(
DataType
::
Float32
);
memoryInit
->
setBackend
(
"cpu"
);
memoryInit
->
resize
(
std
::
vector
<
std
::
size_t
>
(
dims
.
begin
()
+
1
,
dims
.
end
()));
memoryInit
->
zeros
();
pop
->
getOperator
()
->
associateInput
(
0
,
T0
);
op
->
associateInput
(
1
,
myInit
R
);
op
->
associateInput
(
2
,
myInit
R
);
pop
->
getOperator
()
->
associateInput
(
0
,
inputTensor
);
op
->
associateInput
(
1
,
m
emor
yInit
);
op
->
associateInput
(
2
,
m
emor
yInit
);
myGraph
->
compile
(
"cpu"
,
DataType
::
Float32
);
auto
scheduler
=
SequentialScheduler
(
myGraph
);
REQUIRE_NOTHROW
(
scheduler
.
generateScheduling
());
REQUIRE_NOTHROW
(
scheduler
.
forward
(
true
));
// Compare expected output with actual output
auto
memOp
=
std
::
static_pointer_cast
<
OperatorTensor
>
(
sp
k_rec
->
getOperator
());
std
::
static_pointer_cast
<
OperatorTensor
>
(
sp
ikeRecord
->
getOperator
());
REQUIRE
(
approxEq
<
float
>
(
*
(
memOp
->
getOutput
(
0
)),
*
(
expectedOutput
)));
}
}
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment