From 8cfcd350f67ae59e1d1e1154457d0d058d242dcd Mon Sep 17 00:00:00 2001
From: thibault allenet <thibault.allenet@cea.fr>
Date: Mon, 12 Feb 2024 14:23:00 +0000
Subject: [PATCH] Update TensorImpl constructor to take the tensor  dimensions
 instead of the number of elements. Necessary for backend_opencv.

---
 include/aidge/backend/TensorImpl.hpp | 13 ++++++++++---
 include/aidge/data/Tensor.hpp        | 28 ++++++++++++++--------------
 src/data/Tensor.cpp                  |  2 +-
 3 files changed, 25 insertions(+), 18 deletions(-)

diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 62f13acb3..8539c8e36 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -67,7 +67,10 @@ private:
 class TensorImpl {
 public:
     TensorImpl() = delete;
-    TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {};
+    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) 
+    {
+        resize(dims);
+    };
 
     /**
      * Return the (backend, device) pair for this implementation.
@@ -147,8 +150,12 @@ public:
     /**
      * Set the size, in number of elements, that must be stored.
     */
-    void resize(NbElts_t length) {
-        mNbElts = length;
+    virtual void resize(std::vector<DimSize_t> dims) {
+        size_t product = 1;
+        for (size_t num : dims) {
+            product *= num;
+        }
+        mNbElts = product;
     }
 
     /**
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 059238be0..978a85046 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -32,7 +32,7 @@ namespace Aidge {
  * Contains a pointer to an actual contiguous implementation of data.
  */
 class Tensor : public Data,
-               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)> {
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
    private:
     DataType mDataType; /** enum to specify data type. */
     std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
@@ -92,7 +92,7 @@ class Tensor : public Data,
             newTensor.makeContiguous();
         }
         else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);          
             newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
             newTensor.setImpl(newImpl);
         }
@@ -110,7 +110,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
           mSize(SIZE_0) {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
     }
@@ -119,7 +119,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
         resize({SIZE_0});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
         }
         mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
         return *this;
@@ -137,7 +137,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
           mSize(SIZE_0 * SIZE_1) {
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
@@ -146,7 +146,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
         resize({SIZE_0, SIZE_1});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
         }
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
         return *this;
@@ -165,7 +165,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2) {
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
@@ -174,7 +174,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
         }
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
         return *this;
@@ -194,7 +194,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
@@ -203,7 +203,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         }
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
         return *this;
@@ -264,7 +264,7 @@ class Tensor : public Data,
             if (mImpl->device() != std::make_pair(name, device)) {
                 // Backend change: create new impl, copy from old to new and replace
                 // impl
-                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mImpl->size());
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
                 if (copyFrom) {
                     newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
                 }
@@ -272,7 +272,7 @@ class Tensor : public Data,
             }
         }
         else {
-            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mSize);
+            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
         }
     }
 
@@ -302,7 +302,7 @@ class Tensor : public Data,
      */
     void setDataType(const DataType dt, bool copyCast = true) {
         if (mImpl && (dataType() != dt)) {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mImpl->size());
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mDims);
             if (copyCast) {
                 newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size());
             }
@@ -442,7 +442,7 @@ class Tensor : public Data,
 
             computeSize();
             if (mImpl) {
-                mImpl->resize(mSize);
+                mImpl->resize(mDims);
             }
         }
     }
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d45dee563..108541536 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -44,7 +44,7 @@ void Aidge::Tensor::makeContiguous() {
     // Block so that mImpl ref count is 1 for resize()
     {
         // Create a new storage that will be contiguous
-        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
         // Copy elements from old to new storage
         size_t idx = 0;
         while (idx < mSize) {
-- 
GitLab