Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_backend_cpu
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Jerome Hue
aidge_backend_cpu
Commits
1f875305
Commit
1f875305
authored
2 months ago
by
Jerome Hue
Browse files
Options
Downloads
Patches
Plain Diff
save changes
parent
7db6e417
No related branches found
No related tags found
No related merge requests found
Pipeline
#70075
failed
2 months ago
Stage: static_analysis
Stage: build
Stage: test
Stage: coverage
Changes
3
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
src/operator/FCImpl.cpp
+9
-0
9 additions, 0 deletions
src/operator/FCImpl.cpp
unit_tests/operator/Test_Memorize.cpp
+0
-83
0 additions, 83 deletions
unit_tests/operator/Test_Memorize.cpp
unit_tests/operator/Test_MetaOperator.cpp
+2
-1
2 additions, 1 deletion
unit_tests/operator/Test_MetaOperator.cpp
with
11 additions
and
84 deletions
src/operator/FCImpl.cpp
+
9
−
0
View file @
1f875305
...
@@ -72,6 +72,11 @@ void Aidge::FCImpl_cpu::backward()
...
@@ -72,6 +72,11 @@ void Aidge::FCImpl_cpu::backward()
const
auto
&
input1grad
=
op_
.
getInput
(
1
)
->
grad
()
->
refCastFrom
(
input1gradFallback
,
*
(
op_
.
getOutput
(
0
)));
const
auto
&
input1grad
=
op_
.
getInput
(
1
)
->
grad
()
->
refCastFrom
(
input1gradFallback
,
*
(
op_
.
getOutput
(
0
)));
const
auto
&
input2grad
=
(
op_
.
getInput
(
2
))
?
op_
.
getInput
(
2
)
->
grad
()
->
refCastFrom
(
input2gradFallback
,
*
(
op_
.
getOutput
(
0
)))
:
Tensor
();
const
auto
&
input2grad
=
(
op_
.
getInput
(
2
))
?
op_
.
getInput
(
2
)
->
grad
()
->
refCastFrom
(
input2gradFallback
,
*
(
op_
.
getOutput
(
0
)))
:
Tensor
();
Log
::
info
(
" "
);
Log
::
info
(
"(FCImpl.cpp) Gradient of output 0 : {}"
,
fc_grad
->
toString
());
Log
::
info
(
"(FCImpl.cpp) Gradient of input 0 : {}"
,
input0grad
.
toString
());
Log
::
info
(
"(FCImpl.cpp) Gradient of input 1 : {}"
,
input1grad
.
toString
());
// Call kernel
// Call kernel
const
auto
batchSize
=
(
input0grad
.
dims
().
size
()
>
1
)
?
input0grad
.
dims
()[
0
]
:
1
;
const
auto
batchSize
=
(
input0grad
.
dims
().
size
()
>
1
)
?
input0grad
.
dims
()[
0
]
:
1
;
impl
.
backward
(
batchSize
,
impl
.
backward
(
batchSize
,
...
@@ -83,4 +88,8 @@ void Aidge::FCImpl_cpu::backward()
...
@@ -83,4 +88,8 @@ void Aidge::FCImpl_cpu::backward()
input0grad
.
getImpl
()
->
rawPtr
(),
input0grad
.
getImpl
()
->
rawPtr
(),
input1grad
.
getImpl
()
->
rawPtr
(),
input1grad
.
getImpl
()
->
rawPtr
(),
(
op_
.
getInput
(
2
))
?
input2grad
.
getImpl
()
->
rawPtr
()
:
nullptr
);
(
op_
.
getInput
(
2
))
?
input2grad
.
getImpl
()
->
rawPtr
()
:
nullptr
);
Log
::
info
(
"(FCImpl.cpp) Gradient of output 0 : {}"
,
fc_grad
->
toString
());
Log
::
info
(
"(FCImpl.cpp) Gradient of input 0 : {}"
,
input0grad
.
toString
());
Log
::
info
(
"(FCImpl.cpp) Gradient of input 1 : {}"
,
input1grad
.
toString
());
}
}
This diff is collapsed.
Click to expand it.
unit_tests/operator/Test_Memorize.cpp
+
0
−
83
View file @
1f875305
...
@@ -25,7 +25,6 @@
...
@@ -25,7 +25,6 @@
#include
"aidge/graph/OpArgs.hpp"
#include
"aidge/graph/OpArgs.hpp"
#include
"aidge/operator/Add.hpp"
#include
"aidge/operator/Add.hpp"
#include
"aidge/operator/Memorize.hpp"
#include
"aidge/operator/Memorize.hpp"
#include
"aidge/operator/PerMemorize.hpp"
#include
"aidge/operator/Producer.hpp"
#include
"aidge/operator/Producer.hpp"
#include
"aidge/recipes/GraphViewHelper.hpp"
#include
"aidge/recipes/GraphViewHelper.hpp"
#include
"aidge/scheduler/SequentialScheduler.hpp"
#include
"aidge/scheduler/SequentialScheduler.hpp"
...
@@ -169,86 +168,4 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
...
@@ -169,86 +168,4 @@ TEST_CASE("[cpu/operator] Memorize(backward)", "[Memorize][CPU]") {
}
}
}
}
TEST_CASE
(
"[cpu/operator] Memorize(forward)"
,
"[Memorize][Periodic]"
)
{
auto
mul
=
Mul
();
//auto mem = Memorize(/*endStep=*/3);
//auto mem = Identity();
auto
mem
=
PerMemorize
(
2
);
auto
add
=
Add
();
auto
pop
=
Pop
();
auto
stack
=
Stack
(
3
);
// Initialization tensor for Memorize
auto
input
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
1.0
f
,
1.0
f
})),
"input"
);
auto
decay
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.9
f
,
0.9
f
})),
"decay"
);
auto
init
=
Producer
(
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.0
f
,
0.0
f
})),
"init"
);
auto
back
=
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
1.0
f
,
1.0
f
}));
auto
initTensor
=
std
::
make_shared
<
Tensor
>
(
Array2D
<
float
,
3
,
2
>
({
{
{
1
,
1
},
{
1
,
1
},
{
1
,
1
},
}
}));
std
::
static_pointer_cast
<
OperatorTensor
>
(
pop
->
getOperator
())
->
setInput
(
0
,
initTensor
);
auto
memOp
=
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
());
memOp
->
setOutput
(
1
,
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.0
f
,
0.0
f
})));
memOp
->
setOutput
(
0
,
std
::
make_shared
<
Tensor
>
(
Array1D
<
float
,
2
>
({
0.0
f
,
0.0
f
})));
//init->addChild(mem,0,0);
mem
->
addChild
(
mul
,
1
,
0
);
decay
->
addChild
(
mul
,
0
,
1
);
mul
->
addChild
(
add
,
0
,
1
);
pop
->
addChild
(
add
,
0
,
0
);
add
->
addChild
(
mem
,
0
,
0
);
mem
->
addChild
(
stack
,
1
,
0
);
auto
graphView
=
getConnectedGraphView
(
mem
);
graphView
->
compile
();
Log
::
info
(
"GraphView output nodes : {}"
,
graphView
->
outputNodes
().
size
());
for
(
auto
node
:
graphView
->
outputNodes
())
{
Log
::
info
(
"output node type : {}"
,
node
->
type
());
}
// TODO: Set ordered outputs for this node.
auto
scheduler
=
SequentialScheduler
(
graphView
);
scheduler
.
forward
();
//std::static_pointer_cast<OperatorTensor>(mem->getOperator())->getOutput(0)->print();
std
::
static_pointer_cast
<
OperatorTensor
>
(
stack
->
getOperator
())
->
getOutput
(
0
)
->
print
();
REQUIRE
(
true
);
}
TEST_CASE
(
"[cpu/operator] Memorize(forward)"
,
"[Memorize][Periodic2]"
)
{
auto
input
=
Producer
(
std
::
make_shared
<
Tensor
>
(
1.0
f
));
auto
init
=
Producer
(
std
::
make_shared
<
Tensor
>
(
1.0
f
));
auto
add
=
Add
();
//auto mem = PerMemorize(3);
auto
mem
=
Memorize
(
3
);
input
->
addChild
(
add
,
0
,
0
);
add
->
addChild
(
mem
,
0
,
0
);
mem
->
addChild
(
add
,
1
,
1
);
init
->
addChild
(
mem
,
0
,
1
);
auto
gv
=
getConnectedGraphView
(
mem
);
gv
->
compile
();
auto
scheduler
=
SequentialScheduler
(
gv
);
scheduler
.
forward
();
std
::
static_pointer_cast
<
OperatorTensor
>
(
mem
->
getOperator
())
->
getOutput
(
0
)
->
print
();
REQUIRE
(
true
);
}
}
// namespace Aidge
}
// namespace Aidge
This diff is collapsed.
Click to expand it.
unit_tests/operator/Test_MetaOperator.cpp
+
2
−
1
View file @
1f875305
...
@@ -708,7 +708,8 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
...
@@ -708,7 +708,8 @@ TEST_CASE("[cpu/operator] MetaOperator", "[MetaOperator][CPU]") {
auto
fc2
=
FC
(
outChannels
,
inChannels
,
true
,
"fc2"
);
auto
fc2
=
FC
(
outChannels
,
inChannels
,
true
,
"fc2"
);
// NOTE: Account for init step by adding 1 to the max timestep
// NOTE: Account for init step by adding 1 to the max timestep
// parameter.
// parameter.
auto
lif1
=
Leaky
(
nbTimeSteps
+
1
,
beta
,
threshold
,
LeakyReset
::
Subtraction
,
"leaky"
);
//auto lif1 = Leaky(nbTimeSteps + 1, beta, threshold, LeakyReset::Subtraction, "leaky");
auto
lif1
=
Leaky
(
nbTimeSteps
,
beta
,
threshold
,
LeakyReset
::
Subtraction
,
"leaky"
);
// associateInput() does not work
// associateInput() does not work
fc1
->
input
(
1
).
first
->
getOperator
()
->
setOutput
(
0
,
myWeights
);
fc1
->
input
(
1
).
first
->
getOperator
()
->
setOutput
(
0
,
myWeights
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment