Skip to content
Snippets Groups Projects
Commit 8cfcd350 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Update TensorImpl constructor to take the tensor dimensions instead of the...

Update TensorImpl constructor to take the tensor  dimensions instead of the number of elements. Necessary for backend_opencv.
parent 60d17a2f
No related branches found
No related tags found
No related merge requests found
......@@ -67,7 +67,10 @@ private:
class TensorImpl {
public:
TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {};
TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device)
{
resize(dims);
};
/**
* Return the (backend, device) pair for this implementation.
......@@ -147,8 +150,12 @@ public:
/**
* Set the size, in number of elements, that must be stored.
*/
void resize(NbElts_t length) {
mNbElts = length;
virtual void resize(std::vector<DimSize_t> dims) {
size_t product = 1;
for (size_t num : dims) {
product *= num;
}
mNbElts = product;
}
/**
......
......@@ -32,7 +32,7 @@ namespace Aidge {
* Contains a pointer to an actual contiguous implementation of data.
*/
class Tensor : public Data,
public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)> {
public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
private:
DataType mDataType; /** enum to specify data type. */
std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
......@@ -92,7 +92,7 @@ class Tensor : public Data,
newTensor.makeContiguous();
}
else {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
newTensor.setImpl(newImpl);
}
......@@ -110,7 +110,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type),
mDims({SIZE_0}),
mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0)),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
mSize(SIZE_0) {
mImpl->copyFromHost(&arr.data[0], SIZE_0);
}
......@@ -119,7 +119,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
resize({SIZE_0});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0);
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
}
mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
return *this;
......@@ -137,7 +137,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1}),
mStrides({SIZE_1, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1)),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
mSize(SIZE_0 * SIZE_1) {
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
}
......@@ -146,7 +146,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
resize({SIZE_0, SIZE_1});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1);
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
}
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
return *this;
......@@ -165,7 +165,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1, SIZE_2}),
mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2)),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
mSize(SIZE_0 * SIZE_1 * SIZE_2) {
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
}
......@@ -174,7 +174,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2);
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
}
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
return *this;
......@@ -194,7 +194,7 @@ class Tensor : public Data,
mDataType(NativeType<T>::type),
mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)),
mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
}
......@@ -203,7 +203,7 @@ class Tensor : public Data,
constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
if (!mImpl) {
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
}
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
return *this;
......@@ -264,7 +264,7 @@ class Tensor : public Data,
if (mImpl->device() != std::make_pair(name, device)) {
// Backend change: create new impl, copy from old to new and replace
// impl
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mImpl->size());
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
if (copyFrom) {
newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
}
......@@ -272,7 +272,7 @@ class Tensor : public Data,
}
}
else {
mImpl = Registrar<Tensor>::create({name, mDataType})(device, mSize);
mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
}
}
......@@ -302,7 +302,7 @@ class Tensor : public Data,
*/
void setDataType(const DataType dt, bool copyCast = true) {
if (mImpl && (dataType() != dt)) {
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mImpl->size());
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mDims);
if (copyCast) {
newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size());
}
......@@ -442,7 +442,7 @@ class Tensor : public Data,
computeSize();
if (mImpl) {
mImpl->resize(mSize);
mImpl->resize(mDims);
}
}
}
......
......@@ -44,7 +44,7 @@ void Aidge::Tensor::makeContiguous() {
// Block so that mImpl ref count is 1 for resize()
{
// Create a new storage that will be contiguous
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
// Copy elements from old to new storage
size_t idx = 0;
while (idx < mSize) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment