Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
aidge_backend_cpu
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Eclipse Projects
aidge
aidge_backend_cpu
Commits
de64d546
Commit
de64d546
authored
1 year ago
by
Olivier BICHLER
Browse files
Options
Downloads
Patches
Plain Diff
Improved implementation, tests failing don't know why
parent
90a2c09e
No related branches found
No related tags found
2 merge requests
!29
Temporary master branch
,
!26
Draft: Add Convert operator (a.k.a. Transmitter)
Pipeline
#35436
failed
1 year ago
Stage: build
Stage: test
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
include/aidge/backend/cpu/data/TensorImpl.hpp
+30
-17
30 additions, 17 deletions
include/aidge/backend/cpu/data/TensorImpl.hpp
with
30 additions
and
17 deletions
include/aidge/backend/cpu/data/TensorImpl.hpp
+
30
−
17
View file @
de64d546
...
...
@@ -6,6 +6,7 @@
#include
"aidge/utils/Registrar.hpp"
#include
"aidge/utils/Types.h"
#include
"aidge/utils/ErrorHandling.hpp"
#include
"aidge/utils/future_std/span.hpp"
namespace
Aidge
{
template
<
class
T
>
...
...
@@ -13,7 +14,10 @@ class TensorImpl_cpu : public TensorImpl {
private:
const
Tensor
&
mTensor
;
// Impl needs to access Tensor information, but is not
// supposed to change it!
std
::
vector
<
T
>
mData
;
/// Pointer to the data and its capacity
future_std
::
span
<
T
>
mData
;
/// If this instance own the data, std::unique_ptr manages it
std
::
unique_ptr
<
T
[]
>
mDataOwner
;
public:
static
constexpr
const
char
*
Backend
=
"cpu"
;
...
...
@@ -21,9 +25,12 @@ class TensorImpl_cpu : public TensorImpl {
TensorImpl_cpu
(
const
Tensor
&
tensor
)
:
TensorImpl
(
Backend
),
mTensor
(
tensor
)
{}
bool
operator
==
(
const
TensorImpl
&
otherImpl
)
const
override
final
{
const
auto
&
typedOtherImpl
=
reinterpret_cast
<
const
TensorImpl_cpu
<
T
>
&>
(
otherImpl
);
AIDGE_INTERNAL_ASSERT
(
typedOtherImpl
.
data
().
size
()
>=
mTensor
.
size
());
std
::
size_t
i
=
0
;
for
(;
i
<
mTensor
.
size
()
&&
mData
[
i
]
==
reinterpret_cast
<
const
TensorImpl_cpu
<
T
>
&>
(
o
therImpl
)
.
data
()[
i
];
mData
[
i
]
==
typedO
therImpl
.
data
()[
i
];
++
i
)
{
}
return
i
==
mTensor
.
size
();
...
...
@@ -34,7 +41,7 @@ class TensorImpl_cpu : public TensorImpl {
}
// native interface
const
std
::
vector
<
T
>
&
data
()
const
{
return
mData
;
}
const
future_std
::
span
<
T
>&
data
()
const
{
return
mData
;
}
std
::
size_t
scalarSize
()
const
override
{
return
sizeof
(
T
);
}
...
...
@@ -110,41 +117,47 @@ class TensorImpl_cpu : public TensorImpl {
}
void
*
rawPtr
()
override
{
lazyInit
(
mData
);
lazyInit
();
return
mData
.
data
();
};
const
void
*
rawPtr
()
const
override
{
AIDGE_ASSERT
(
mData
.
size
()
=
=
mTensor
.
size
(),
"accessing uninitialized const rawPtr"
);
AIDGE_ASSERT
(
mData
.
size
()
>
=
mTensor
.
size
(),
"accessing uninitialized const rawPtr"
);
return
mData
.
data
();
};
void
*
hostPtr
()
override
{
lazyInit
(
mData
);
lazyInit
();
return
mData
.
data
();
};
const
void
*
hostPtr
()
const
override
{
AIDGE_ASSERT
(
mData
.
size
()
=
=
mTensor
.
size
(),
"accessing uninitialized const hostPtr"
);
AIDGE_ASSERT
(
mData
.
size
()
>
=
mTensor
.
size
(),
"accessing uninitialized const hostPtr"
);
return
mData
.
data
();
};
void
*
getRaw
(
std
::
size_t
idx
){
return
static_cast
<
void
*>
(
static_cast
<
T
*>
(
rawPtr
())
+
idx
);
};
void
*
getRaw
(
std
::
size_t
idx
)
{
AIDGE_ASSERT
(
idx
<
mData
.
size
(),
"idx out of range"
);
return
static_cast
<
void
*>
(
static_cast
<
T
*>
(
rawPtr
())
+
idx
);
};
virtual
~
TensorImpl_cpu
()
=
default
;
void
setRawPtr
(
void
*
ptr
)
override
final
{
T
*
newPtr
=
static_cast
<
T
*>
(
ptr
);
mData
=
std
::
vector
<
T
>
(
newP
tr
,
newPtr
+
mTensor
.
size
()
);
void
setRawPtr
(
void
*
ptr
,
NbElts_t
length
)
override
final
{
AIDGE_ASSERT
(
length
>=
mTensor
.
size
(),
"trying to set raw pointer of insufficient capacity"
);
mData
=
future_std
::
span
<
T
>
(
static_cast
<
T
*>
(
p
tr
)
,
length
);
};
private
:
void
lazyInit
(
std
::
vector
<
T
>
&
data
)
{
assert
(
mTensor
.
dataType
()
==
NativeType
<
T
>::
type
);
private
:
void
lazyInit
()
{
AIDGE_INTERNAL_ASSERT
(
mTensor
.
dataType
()
==
NativeType
<
T
>::
type
);
if
(
data
.
size
()
!=
mTensor
.
size
())
data
.
resize
(
mTensor
.
size
());
if
(
mData
.
size
()
<
mTensor
.
size
())
{
// Need more data, a re-allocation will occur
AIDGE_ASSERT
(
mData
.
empty
()
||
mDataOwner
!=
nullptr
,
"trying to enlarge non-owned data"
);
mDataOwner
.
reset
(
new
T
[
mTensor
.
size
()]);
mData
=
future_std
::
span
<
T
>
(
mDataOwner
.
get
(),
mTensor
.
size
());
}
}
};
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment