Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_backend_cpu
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Jerome Hue
aidge_backend_cpu
Commits
beaa2157
Commit
beaa2157
authored
3 months ago
by
Jerome Hue
Browse files
Options
Downloads
Patches
Plain Diff
Add accumulator test for Memorize
parent
949b80f8
No related branches found
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
src/operator/SubImpl.cpp
+1
-1
1 addition, 1 deletion
src/operator/SubImpl.cpp
unit_tests/operator/Test_Memorize.cpp
+189
-1
189 additions, 1 deletion
unit_tests/operator/Test_Memorize.cpp
unit_tests/operator/Test_MetaOperator.cpp
+0
-1
0 additions, 1 deletion
unit_tests/operator/Test_MetaOperator.cpp
with
190 additions
and
3 deletions
src/operator/SubImpl.cpp
+
1
−
1
View file @
beaa2157
...
...
@@ -42,8 +42,8 @@ void Aidge::SubImpl_cpu::forward() {
template
<
>
void
Aidge
::
SubImpl_cpu
::
backward
()
{
Log
::
info
(
"SubImpl_cpu::backward()"
);
const
Sub_Op
&
op_
=
dynamic_cast
<
const
Sub_Op
&>
(
mOp
);
//Log::info("SubImpl_cpu::backward() : Node {}", op_.name());
auto
in0
=
op_
.
getInput
(
0
);
auto
in1
=
op_
.
getInput
(
1
);
...
...
This diff is collapsed.
Click to expand it.
unit_tests/operator/Test_Memorize.cpp
+
189
−
1
View file @
beaa2157
...
...
@@ -4,11 +4,14 @@
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include
<aidge/operator/Identity.hpp>
#include
<aidge/operator/Mul.hpp>
#include
<aidge/operator/Pop.hpp>
#include
<aidge/operator/Stack.hpp>
#include
<memory>
#include
<string>
...
...
@@ -22,6 +25,7 @@
#include
"aidge/graph/OpArgs.hpp"
#include
"aidge/operator/Add.hpp"
#include
"aidge/operator/Memorize.hpp"
#include
"aidge/operator/PerMemorize.hpp"
#include
"aidge/operator/Producer.hpp"
#include
"aidge/recipes/GraphViewHelper.hpp"
#include
"aidge/scheduler/SequentialScheduler.hpp"
...
...
@@ -63,4 +67,188 @@ TEST_CASE("[cpu/operator] Memorize(forward)", "[Memorize][CPU]") {
REQUIRE
((
*
other
==
expectedOutput
));
}
}
TEST_CASE
(
"[cpu/operator] Memorize(backward)"
,
"[Memorize][CPU]"
)
{
// TODO: We will need something a bit more complex.
SECTION
(
"Test simple"
)
{
std
::
shared_ptr
<
Tensor
>
inputTensor
=
std
::
make_shared
<
Tensor
>
(
Array1D
<
int
,
1
>
{{
1
}});
auto
input
=
Producer
({
1
},
"input"
);
auto
init
=
Producer
({
1
},
"init"
);
auto
add
=
Add
(
"add"
);
auto
mem
=
Memorize
(
3
,
"mem"
);
input
->
addChild
(
add
,
0
,
0
);
init
->
addChild
(
mem
,
0
,
1
);
add
->
addChild
(
mem
,
0
,
0
);
mem
->
addChild
(
/*otherNode=*/
add
,
/*outId=*/
1
,
/*otherInId=*/
1
);
input
->
getOperator
()
->
setOutput
(
0
,
inputTensor
);
init
->
getOperator
()
->
setOutput
(
0
,
inputTensor
);
auto
g
=
getConnectedGraphView
(
input
);
g
->
setDataType
(
Aidge
::
DataType
::
Int32
);
g
->
setBackend
(
"cpu"
);
g
->
forwardDims
();
g
->
save
(
"simple_graph"
);
SequentialScheduler
scheduler
(
g
);
REQUIRE_NOTHROW
(
scheduler
.
forward
());
scheduler
.
saveSchedulingDiagram
(
"simple"
);
const
Tensor
expectedOutput
=
Array1D
<
int
,
1
>
{{
4
}};
std
::
shared_ptr
<
Tensor
>
other
=
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
())
->
getOutput
(
0
);
other
->
print
();
REQUIRE
((
*
other
==
expectedOutput
));
// Print gradient
Log
::
notice
(
"Print gradient, before backward "
);
other
->
grad
()
->
print
();
REQUIRE_NOTHROW
(
scheduler
.
backward
());
// Print gradient, after backward NOTE: what do we expect here
Log
::
notice
(
"Print gradient, after backward "
);
other
->
grad
()
->
print
();
}
SECTION
(
"Test 2"
)
{
auto
mul
=
Mul
();
auto
mem
=
Memorize
(
/*endStep=*/
3
);
auto
add
=
Add
();
auto
pop
=
Pop
();
// Initialization tensor for Memorize
auto
input
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
1.0
f
,
1.0
f
})),
"input"
);
auto
decay
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.9
f
,
0.9
f
})),
"decay"
);
auto
init
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.0
f
,
0.0
f
})),
"init"
);
auto
back
=
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
1.0
f
,
1.0
f
}));
auto
initTensor
=
std
::
make_shared
<
Tensor
>
(
Array2D
<
float
,
3
,
2
>
({
{
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
}
}));
std
::
static_pointer_cast
<
OperatorTensor
>
(
pop
->
getOperator
())
->
setInput
(
0
,
initTensor
);
init
->
addChild
(
mem
,
0
,
1
);
mem
->
addChild
(
mul
,
1
,
0
);
decay
->
addChild
(
mul
,
0
,
1
);
mul
->
addChild
(
add
,
0
,
1
);
pop
->
addChild
(
add
,
0
,
0
);
add
->
addChild
(
mem
,
0
,
0
);
auto
graphView
=
getConnectedGraphView
(
mem
);
graphView
->
compile
();
auto
scheduler
=
SequentialScheduler
(
graphView
);
scheduler
.
forward
();
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
())
->
getOutput
(
0
)
->
print
();
graphView
->
save
(
"graphSimple2"
);
// TODO: Set gradient and try to backward
Log
::
warn
(
"Printing gradient"
);
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
())
->
getOutput
(
0
)
->
grad
()
->
print
();
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
())
->
getOutput
(
0
)
->
setGrad
(
back
);
Log
::
warn
(
"Starting Backward"
);
scheduler
.
backward
();
// Expected gradient : 0.81
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
())
->
getInput
(
0
)
->
grad
()
->
print
();
Log
::
warn
(
"final print"
);
std
::
static_pointer_cast
<
OperatorTensor
>
(
pop
->
getOperator
())
->
getInput
(
0
)
->
grad
()
->
print
();
REQUIRE
(
true
);
}
}
TEST_CASE
(
"[cpu/operator] Memorize(forward)"
,
"[Memorize][Periodic]"
)
{
auto
mul
=
Mul
();
//auto mem = Memorize(/*endStep=*/3);
//auto mem = Identity();
auto
mem
=
PerMemorize
(
2
);
auto
add
=
Add
();
auto
pop
=
Pop
();
auto
stack
=
Stack
(
3
);
// Initialization tensor for Memorize
auto
input
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
1.0
f
,
1.0
f
})),
"input"
);
auto
decay
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.9
f
,
0.9
f
})),
"decay"
);
auto
init
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.0
f
,
0.0
f
})),
"init"
);
auto
back
=
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
1.0
f
,
1.0
f
}));
auto
initTensor
=
std
::
make_shared
<
Tensor
>
(
Array2D
<
float
,
3
,
2
>
({
{
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
}
}));
std
::
static_pointer_cast
<
OperatorTensor
>
(
pop
->
getOperator
())
->
setInput
(
0
,
initTensor
);
auto
memOp
=
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
());
memOp
->
setOutput
(
1
,
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.0
f
,
0.0
f
})));
memOp
->
setOutput
(
0
,
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.0
f
,
0.0
f
})));
//init->addChild(mem,0,0);
mem
->
addChild
(
mul
,
1
,
0
);
decay
->
addChild
(
mul
,
0
,
1
);
mul
->
addChild
(
add
,
0
,
1
);
pop
->
addChild
(
add
,
0
,
0
);
add
->
addChild
(
mem
,
0
,
0
);
mem
->
addChild
(
stack
,
1
,
0
);
auto
graphView
=
getConnectedGraphView
(
mem
);
graphView
->
compile
();
Log
::
info
(
"GraphView output nodes : {}"
,
graphView
->
outputNodes
().
size
());
for
(
auto
node
:
graphView
->
outputNodes
())
{
Log
::
info
(
"output node type : {}"
,
node
->
type
());
}
// TODO: Set ordered outputs for this node.
auto
scheduler
=
SequentialScheduler
(
graphView
);
scheduler
.
forward
();
//std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
std
::
static_pointer_cast
<
OperatorTensor
>
(
stack
->
getOperator
())
->
getOutput
(
0
)
->
print
();
REQUIRE
(
true
);
}
TEST_CASE
(
"[cpu/operator] Memorize(forward)"
,
"[Memorize][Periodic2]"
)
{
auto
input
=
Producer
(
std
::
make_shared
<
Tensor
>
(
1.0
f
));
auto
init
=
Producer
(
std
::
make_shared
<
Tensor
>
(
1.0
f
));
auto
add
=
Add
();
//auto mem = PerMemorize(3);
auto
mem
=
Memorize
(
3
);
input
->
addChild
(
add
,
0
,
0
);
add
->
addChild
(
mem
,
0
,
0
);
mem
->
addChild
(
add
,
1
,
1
);
init
->
addChild
(
mem
,
0
,
1
);
auto
gv
=
getConnectedGraphView
(
mem
);
gv
->
compile
();
auto
scheduler
=
SequentialScheduler
(
gv
);
scheduler
.
forward
();
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
())
->
getOutput
(
0
)
->
print
();
REQUIRE
(
true
);
}
}
// namespace Aidge
This diff is collapsed.
Click to expand it.
unit_tests/operator/Test_MetaOperator.cpp
+
0
−
1
View file @
beaa2157
...
...
@@ -881,7 +881,6 @@ TEST_CASE("[cpu/operator] MetaOperator", "[Leaky][CPU][Simple]") {
{
1
,
1
,
1
,
1
},
}});
auto
pop
=
Pop
(
"pop"
);
auto
popOp
=
std
::
static_pointer_cast
<
OperatorTensor
>
(
pop
->
getOperator
());
auto
stack
=
Stack
(
nbTimeSteps
,
"stack"
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment