Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_core
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Cyril Moineau
aidge_core
Commits
8fab299e
Commit
8fab299e
authored
1 year ago
by
Maxence Naud
Committed by
Maxence Naud
1 year ago
Browse files
Options
Downloads
Patches
Plain Diff
[Add] operator+,-,*,/ to Tensor class and [Add] gradient initialization for Tensor
parent
e7c2cf76
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
include/aidge/data/Tensor.hpp
+120
-0
120 additions, 0 deletions
include/aidge/data/Tensor.hpp
src/data/Tensor.cpp
+5
-2
5 additions, 2 deletions
src/data/Tensor.cpp
with
125 additions
and
2 deletions
include/aidge/data/Tensor.hpp
+
120
−
0
View file @
8fab299e
...
...
@@ -24,6 +24,10 @@
#include
"aidge/backend/TensorImpl.hpp"
#include
"aidge/data/Data.hpp"
#include
"aidge/operator/Add.hpp"
#include
"aidge/operator/Div.hpp"
#include
"aidge/operator/Mul.hpp"
#include
"aidge/operator/Sub.hpp"
#include
"aidge/utils/Registrar.hpp"
#include
"aidge/utils/Types.h"
#include
"aidge/utils/ArrayHelpers.hpp"
...
...
@@ -231,6 +235,102 @@ class Tensor : public Data,
return
*
mImpl
==
*
(
otherTensor
.
mImpl
);
}
/**
* @brief Element-wise addition operation for two ``Tensor``s.
* @note ``Tensor``s should be stored on the same backend.
* @todo If input ``Tensor``s have a different dataType, the output should
* have the dataType of the ``Tensor`` with the highest precision.
*
* @param other
* @return Tensor
*/
Tensor
operator
+
(
const
Tensor
&
other
)
const
{
AIDGE_ASSERT
(
hasImpl
()
&&
other
.
hasImpl
(),
"At least one Tensor cannot perform any binary operation because it has no implementation."
);
AIDGE_ASSERT
(
mImpl
->
backend
()
==
other
.
mImpl
->
backend
(),
"Tensors must have the same backend"
);
AIDGE_ASSERT
(
dataType
()
==
other
.
dataType
(),
"Tensors must have the same backend"
);
auto
add_
=
Add_Op
(
2
);
add_
.
associateInput
(
0
,
std
::
make_shared
<
Tensor
>
(
*
this
));
add_
.
associateInput
(
1
,
std
::
make_shared
<
Tensor
>
(
other
));
add_
.
computeOutputDims
();
add_
.
setDataType
(
dataType
());
add_
.
setBackend
(
mImpl
->
backend
());
add_
.
forward
();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return
add_
.
getOutput
(
0
)
->
clone
();
}
/**
* @brief Element-wise substraction operation for two ``Tensor``s.
* @note ``Tensor``s should be stored on the same backend.
* @todo If input ``Tensor``s have a different dataType, the output should
* have the dataType of the ``Tensor`` with the highest precision.
*
* @param other
* @return Tensor
*/
Tensor
operator
-
(
const
Tensor
&
other
)
const
{
AIDGE_ASSERT
(
hasImpl
()
&&
other
.
hasImpl
(),
"At least one Tensor cannot perform any binary operation because it has no implementation."
);
AIDGE_ASSERT
(
mImpl
->
backend
()
==
other
.
mImpl
->
backend
(),
"Tensors must have the same backend"
);
AIDGE_ASSERT
(
dataType
()
==
other
.
dataType
(),
"Tensors must have the same backend"
);
auto
sub_
=
Sub_Op
();
sub_
.
associateInput
(
0
,
std
::
make_shared
<
Tensor
>
(
*
this
));
sub_
.
associateInput
(
1
,
std
::
make_shared
<
Tensor
>
(
other
));
sub_
.
computeOutputDims
();
sub_
.
setDataType
(
dataType
());
sub_
.
setBackend
(
mImpl
->
backend
());
sub_
.
forward
();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return
sub_
.
getOutput
(
0
)
->
clone
();
}
/**
* @brief Element-wise multiplication operation for two ``Tensor``s.
* @note ``Tensor``s should be stored on the same backend.
* @todo If input ``Tensor``s have a different dataType, the output should
* have the dataType of the ``Tensor`` with the highest precision.
*
* @param other
* @return Tensor
*/
Tensor
operator
*
(
const
Tensor
&
other
)
const
{
AIDGE_ASSERT
(
hasImpl
()
&&
other
.
hasImpl
(),
"At least one Tensor cannot perform any binary operation because it has no implementation."
);
AIDGE_ASSERT
(
mImpl
->
backend
()
==
other
.
mImpl
->
backend
(),
"Tensors must have the same backend"
);
AIDGE_ASSERT
(
dataType
()
==
other
.
dataType
(),
"Tensors must have the same backend"
);
auto
mul_
=
Mul_Op
();
mul_
.
associateInput
(
0
,
std
::
make_shared
<
Tensor
>
(
*
this
));
mul_
.
associateInput
(
1
,
std
::
make_shared
<
Tensor
>
(
other
));
mul_
.
computeOutputDims
();
mul_
.
setDataType
(
dataType
());
mul_
.
setBackend
(
mImpl
->
backend
());
mul_
.
forward
();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return
mul_
.
getOutput
(
0
)
->
clone
();
}
/**
* @brief Element-wise division operation for two ``Tensor``s.
* @note ``Tensor``s should be stored on the same backend.
* @todo If input ``Tensor``s have a different dataType, the output should
* have the dataType of the ``Tensor`` with the highest precision.
*
* @param other
* @return Tensor
*/
Tensor
operator
/
(
const
Tensor
&
other
)
const
{
AIDGE_ASSERT
(
hasImpl
()
&&
other
.
hasImpl
(),
"At least one Tensor cannot perform any binary operation because it has no implementation."
);
AIDGE_ASSERT
(
mImpl
->
backend
()
==
other
.
mImpl
->
backend
(),
"Tensors must have the same backend"
);
AIDGE_ASSERT
(
dataType
()
==
other
.
dataType
(),
"Tensors must have the same backend"
);
auto
div_
=
Div_Op
();
div_
.
associateInput
(
0
,
std
::
make_shared
<
Tensor
>
(
*
this
));
div_
.
associateInput
(
1
,
std
::
make_shared
<
Tensor
>
(
other
));
div_
.
computeOutputDims
();
div_
.
setDataType
(
dataType
());
div_
.
setBackend
(
mImpl
->
backend
());
div_
.
forward
();
// using add_backend = std::remove_reference_t<decltype(*Registrar<Add_Op>::create("cpu")(std::declval<const Add_Op&>()))>;
return
div_
.
getOutput
(
0
)
->
clone
();
}
public
:
/**
* @brief Perform a deep copy of the tensor.
...
...
@@ -461,6 +561,26 @@ public:
return
mGrad
;
}
/**
* @brief Associate the gradient with a Tensor instance and set its implementation
* if none was previously set.
* @note Dimensions for the Tensor instance are copied from the original current Tensor.
* @note If a Tensor instance was already associated, only the implementation is created
* with values set to 0.
* @note If Tensor instance and implementation already existed for the gradient
* nothing is done.
*/
void
initGradient
()
{
if
(
!
mGrad
)
{
mGrad
=
std
::
make_shared
<
Tensor
>
(
mDims
);
}
if
(
!
mGrad
->
hasImpl
())
{
mGrad
->
setDataType
(
dataType
());
mGrad
->
setBackend
(
hasImpl
()
?
mImpl
->
backend
()
:
"cpu"
);
mGrad
->
zeros
();
}
}
/**
* @brief From the the 1D contiguous index, return the coordinate of an element in the tensor.
* Beware: do not use this function with the storage index!
...
...
This diff is collapsed.
Click to expand it.
src/data/Tensor.cpp
+
5
−
2
View file @
8fab299e
...
...
@@ -19,6 +19,9 @@
#include
"aidge/utils/Types.h"
Aidge
::
Tensor
&
Aidge
::
Tensor
::
operator
=
(
const
Aidge
::
Tensor
&
other
)
{
if
(
this
==
&
other
)
{
return
*
this
;
}
resize
(
other
.
dims
(),
other
.
strides
());
setDataType
(
other
.
dataType
(),
false
);
// do not convert existing data
if
(
other
.
hasImpl
())
{
...
...
@@ -253,7 +256,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) {
AIDGE_ASSERT
(
src
.
isContiguous
(),
"cannot copy-cast non-contiguous tensor"
);
// Current Tensor has necessarily a data type, but may not have backend
if
(
!
get
Impl
())
{
if
(
!
has
Impl
())
{
// If no backend was set for the current tensor, use the same as src
const
auto
deviceSrc
=
src
.
getImpl
()
->
device
();
setBackend
(
deviceSrc
.
first
,
deviceSrc
.
second
);
...
...
@@ -272,7 +275,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
AIDGE_ASSERT
(
src
.
isContiguous
(),
"cannot copy from non-contiguous tensor"
);
// Current Tensor has necessarily a data type, but may not have backend
if
(
!
get
Impl
())
{
if
(
!
has
Impl
())
{
// If no backend was set for the current tensor, use the same as src
const
auto
deviceSrc
=
src
.
getImpl
()
->
device
();
setBackend
(
deviceSrc
.
first
,
deviceSrc
.
second
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment