From f3ab1b2e506c0a63665bc6f3cb53a78c97a383d7 Mon Sep 17 00:00:00 2001
From: ls232920 <laurent.soulier@cea.fr>
Date: Tue, 17 Oct 2023 09:20:11 +0000
Subject: [PATCH] [IMPR][MAJ] removing reference to Tensor object inside
 TensorImpl and improving creation interface

---
 include/aidge/backend/TensorImpl.hpp  |  83 +++++++++----
 include/aidge/data/Data.hpp           | 166 ++++++++++++++++++++++++--
 include/aidge/data/Tensor.hpp         |  63 +++++++---
 include/aidge/utils/TensorUtils.hpp   |  30 +++--
 python_binding/data/pybind_Tensor.cpp | 163 +++++++++++++------------
 src/data/Tensor.cpp                   |  14 ++-
 6 files changed, 369 insertions(+), 150 deletions(-)

diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 794ce4699..b93e0c044 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -26,11 +26,29 @@ class TensorImpl
 {
 public:
     TensorImpl() = delete;
-    /// @brief Construction for a given Tensor, with a given backend.
-    /// @param backend Name of the chosen backend
-    /// @param i_Tensor Reference to the i_Tensor describing the data.
-    TensorImpl(const char *backend, Tensor const &i_Tensor) :
-        mBackend(backend), mTensor(i_Tensor){};
+    /// @brief Construction for a given data type, with a given backend.
+    /// @param backend Name of the chosen backend.
+    /// @param i_DataType Data type to be stored.
+    /// @param i_FirstDataCoordinates Logical coordinates of the data at null natural
+    /// coordinates
+    /// @param i_Dimensions Tensor dimensions
+    /// @sa Coord_t
+    TensorImpl(
+        const char *backend,
+        DataType const i_DataType,
+        std::vector<Coord_t> const &i_FirstDataCoordinates,
+        std::vector<DimSize_t> const &i_Dimensions) :
+        mBackend(backend),
+        mDataType(i_DataType),
+        mvFirstDataCoordinates(i_FirstDataCoordinates),
+        mvDimensions(i_Dimensions)
+    {
+        assert(
+            mvDimensions.size() == mvFirstDataCoordinates.size()
+            && "Tensors origin coordinates and dimensions must have the same size");
+        mScalarSize = detail::sizeOf(mDataType);
+        computeLayout();
+    };
     /// @brief Creates a new TensorImpl with same properties as self
     /// @details Copy all characteristics of calling TensorImpl and its data (deep copy).
     /// @return Pointer to a copy of the TensorImpl object
@@ -134,10 +152,10 @@ public:
         return mNbElts;
     };
 
-    /// @brief Returns the reference to the Tensor
-    inline Tensor const &getTensor() const noexcept
+    /// @brief Returns stored data type
+    inline DataType getDataType() const noexcept
     {
-        return mTensor;
+        return mDataType;
     };
 
     /// @brief Get the logical coordinates of the data at given index
@@ -186,7 +204,7 @@ public:
     }
 
     /// @brief Change TensorImpl dimensions
-    /// @note Current API does not change first data coordinates
+    /// @note Current API reset origin coordinates to null values
     /// @note Preexisting data are lost whatever dims value
     /// @note If new dimensions are not large enough to hold the Tensors that
     /// referenced this implementation is undefined behavior
@@ -195,31 +213,29 @@ public:
     virtual void resize(const std::vector<DimSize_t> &dims) = 0;
 
 private:
+    /// @brief Desired backend identified as a string
+    const char *mBackend = nullptr;
     /// @brief Copy of the Tensor data type.
     /// @details The purpose of this copy is to remove the need to access the Tensor
     /// object
     /// @todo Is there a way to avoid this copy?
     DataType mDataType = DataType::Undefined;
-    /// @brief Desired backend identified as a string
-    const char *mBackend = nullptr;
-    /// @brief Tensor whose the TensorImpl manage the memory
-    /// @deprecated Future version may be referenced by several Tensors
-    /// @todo edesign must be
-    /// considered so that a TensorImpl either does not know of its user or it has a
-    /// list of all of them
-    Tensor const &mTensor;
+    std::size_t mScalarSize = 0;
+    /// @brief Logical coordinates of the data stored at null natural coordinates.
+    /// @sa Coord_t
+    std::vector<Coord_t> mvFirstDataCoordinates;
+    /// @brief Actual in-memory tensor dimensions expressed as number of elements
+    /// along each dimension
+    std::vector<DimSize_t> mvDimensions;
     /// @brief Number of stored data
-    /// @details mNbElts == prod(mDimensions)/mScalarSize
+    /// @details mNbElts == prod(mDimensions)<br>
+    /// A null value indicates that no data has been actually allocated
     /// @todo Implement overflow protection as mNbElts*mScalarSize must fit into a
     /// std::size_t
     NbElts_t mNbElts = 0;
     /// @brief Size, in bytes, of a single data
     /// @todo Implement overflow protection as mNbElts*mScalarSize must fit into a
     /// std::size_t
-    std::size_t mScalarSize = 0;
-    /// @brief Actual in-memory tensor dimensions expressed as number of elements
-    /// along each dimension
-    std::vector<DimSize_t> mvDimensions;
     /// @brief memory tensor layout
     /// @details Let ptr be the memory address of a data of logical coordinates
     /// (x;y;z) in a 3D tensor, casted to Byte_t *.<br> Let vLayout be the
@@ -238,8 +254,6 @@ private:
     /// @todo If needed, replace by a _any storing a backend-dependant handle on
     /// storage.
     Byte_t *mStorage = nullptr;
-    /// @brief Logical coordinates of the data stored at mStorage.
-    std::vector<Coord_t> mvFirstDataCoordinates;
 
 protected:
     /// @brief Number of element setter for inherited classes
@@ -301,6 +315,21 @@ protected:
     {
         mvFirstDataCoordinates = iCoords;
     }
+    /// @brief Computes and the number of elements the Tensor is supposed to store
+    NbElts_t size()
+    {
+        return std::accumulate(
+            std::begin(getDimensions()),
+            std::end(getDimensions()),
+            1,
+            std::multiplies<NbElts_t>());
+    }
+    /// @brief Computes and set the number of elements the Tensor is supposed to store
+    /// @note To be called only after the data has effectively been allocated.
+    void computeNbElts()
+    {
+        mNbElts = size();
+    }
     /// @brief Computes layout from Tensor total dimensions and size of a single data.
     /// @note Assumes that the Tensor dimensions and data type are valid.
     /// @note Should be used only during (re)initialization of storage.
@@ -308,6 +337,10 @@ protected:
     {
         std::size_t NbDims = mvDimensions.size();
         mvLayout.resize(NbDims);
+        if (NbDims == 0)
+        {
+            return;
+        }
 
         mvLayout[NbDims - 1] = mScalarSize;
         for (std::size_t i = 0; i < NbDims - 1; ++i)
@@ -326,7 +359,7 @@ protected:
         //     *pLayout = (*pPrevLayout) * (*pDimension);
         // }
         assert(
-            mvLayout[0] * mvDimensions[0] == mNbElts * mvLayout[NbDims - 1]
+            mvLayout[0] * mvDimensions[0] == size() * mvLayout[NbDims - 1]
             && "Error while computing layout");
     }
     ///  @brief Copy all the characteristics of a given TensorImpl but not its data
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 6810ac590..5abac778b 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -12,6 +12,8 @@
 #ifndef AIDGE_DATA_H_
 #define AIDGE_DATA_H_
 
+#include <type_traits>
+
 #include "aidge/utils/Attributes.hpp"
 
 namespace Aidge
@@ -73,20 +75,163 @@ private:
 // required though pure virtual
 // inline to avoid redefinition
 inline Data::~Data() noexcept = default;
-} // namespace Aidge
 
-namespace
+namespace detail
 {
 template<typename T> struct NativeType
 {
-    static const Aidge::DataType type;
+    // generic case not implemented;
+    static_assert(!std::is_same<T, T>::value, "NativeType not supported on given type");
 };
-template<> const Aidge::DataType NativeType<double>::type = Aidge::DataType::Float64;
-template<> const Aidge::DataType NativeType<float>::type = Aidge::DataType::Float32;
-template<> const Aidge::DataType NativeType<long>::type = Aidge::DataType::Int64;
-template<> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32;
-template<>
-const Aidge::DataType NativeType<std::uint16_t>::type = Aidge::DataType::UInt16;
+template<> struct NativeType<double>
+{
+    static constexpr DataType aidgeType = DataType::Float64;
+};
+template<> struct NativeType<float>
+{
+    static constexpr DataType aidgeType = DataType::Float32;
+};
+template<> struct NativeType<std::int8_t>
+{
+    static constexpr DataType aidgeType = DataType::Int8;
+};
+template<> struct NativeType<std::uint8_t>
+{
+    static constexpr DataType aidgeType = DataType::UInt8;
+};
+template<> struct NativeType<std::int16_t>
+{
+    static constexpr DataType aidgeType = DataType::Int16;
+};
+template<> struct NativeType<std::uint16_t>
+{
+    static constexpr DataType aidgeType = DataType::UInt16;
+};
+template<> struct NativeType<std::int32_t>
+{
+    static constexpr DataType aidgeType = DataType::Int32;
+};
+template<> struct NativeType<std::uint32_t>
+{
+    static constexpr DataType aidgeType = DataType::UInt32;
+};
+template<> struct NativeType<std::int64_t>
+{
+    static constexpr DataType aidgeType = DataType::Int64;
+};
+template<> struct NativeType<std::uint64_t>
+{
+    static constexpr DataType aidgeType = DataType::UInt64;
+};
+template<typename T> constexpr DataType NativeType_v = NativeType<T>::aidgeType;
+
+template<DataType E> struct CppType
+{
+    // generic case not implemented;
+    static_assert(!(E == E), "CppType not supported on given aidge type");
+};
+template<> struct CppType<DataType::Float64>
+{
+    using type = double;
+};
+template<> struct CppType<DataType::Float32>
+{
+    using type = float;
+};
+template<> struct CppType<DataType::Int8>
+{
+    using type = std::int8_t;
+};
+template<> struct CppType<DataType::UInt8>
+{
+    using type = std::uint8_t;
+};
+template<> struct CppType<DataType::Int16>
+{
+    using type = std::int16_t;
+};
+template<> struct CppType<DataType::UInt16>
+{
+    using type = std::uint16_t;
+};
+template<> struct CppType<DataType::Int32>
+{
+    using type = std::int32_t;
+};
+template<> struct CppType<DataType::UInt32>
+{
+    using type = std::uint32_t;
+};
+template<> struct CppType<DataType::Int64>
+{
+    using type = std::int64_t;
+};
+template<> struct CppType<DataType::UInt64>
+{
+    using type = std::uint64_t;
+};
+template<DataType E> using CppType_t = typename CppType<E>::type;
+constexpr std::size_t sizeOf(DataType const i_dataType)
+{
+    switch (i_dataType)
+    {
+        case DataType::Float64:
+        {
+            return sizeof(CppType_t<DataType::Float64>);
+            break;
+        }
+        case DataType::Float32:
+        {
+            return sizeof(CppType_t<DataType::Float32>);
+            break;
+        }
+        case DataType::Int8:
+        {
+            return sizeof(CppType_t<DataType::Int8>);
+            break;
+        }
+        case DataType::UInt8:
+        {
+            return sizeof(CppType_t<DataType::UInt8>);
+            break;
+        }
+        case DataType::Int16:
+        {
+            return sizeof(CppType_t<DataType::Int16>);
+            break;
+        }
+        case DataType::UInt16:
+        {
+            return sizeof(CppType_t<DataType::UInt16>);
+            break;
+        }
+        case DataType::Int32:
+        {
+            return sizeof(CppType_t<DataType::Int32>);
+            break;
+        }
+        case DataType::UInt32:
+        {
+            return sizeof(CppType_t<DataType::UInt32>);
+            break;
+        }
+        case DataType::Int64:
+        {
+            return sizeof(CppType_t<DataType::Int64>);
+            break;
+        }
+        case DataType::UInt64:
+        {
+            return sizeof(CppType_t<DataType::UInt64>);
+            break;
+        }
+        default:
+        {
+            assert(false && "sizeOf called on unrecognized data type");
+            return 0;
+        }
+    }
+}
 
 template<>
 const char* const EnumStrings<Aidge::DataType>::data[]
@@ -94,6 +239,7 @@ const char* const EnumStrings<Aidge::DataType>::data[]
        "Int3",    "Int4",    "Int5",    "Int6",     "Int7",   "Int8",    "Int16",
        "Int32",   "Int64",   "UInt2",   "UInt3",    "UInt4",  "UInt5",   "UInt6",
        "UInt7",   "UInt8",   "UInt16",  "UInt32",   "UInt64"};
-} // namespace
+} // namespace detail
+} // namespace Aidge
 
 #endif /* AIDGE_DATA_H_ */
\ No newline at end of file
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index de02654d8..caca9fbeb 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -68,7 +68,10 @@ class Tensor : public Data,
                public Registrable<
                    Tensor,
                    std::tuple<std::string, DataType>,
-                   detail::pimpl::ImplPtr_t(const Tensor &)>
+                   detail::pimpl::ImplPtr_t(
+                       DataType const i_DataType,
+                       std::vector<Coord_t> const &i_FirstDataCoordinates,
+                       std::vector<DimSize_t> const &i_Dimensions)>
 {
 private:
     ///@brief Copy data from a raw buffer inside the Tensor data implementation
@@ -77,15 +80,16 @@ private:
     void copyData(Byte_t const *const srcPtr, NbElts_t const Length);
     /// @brief enum to specify data type.
     DataType mDataType = DataType::Undefined;
+    /// @brief Logical coordinates of the first data (lexicographic order) of the tensor
+    /// active area
+    /// @sa Coord_t
+    std::vector<Coord_t> mvActiveAreaOrigin = {};
     /// @brief Dimensions of the active area.
-    std::vector<DimSize_t> mDims;
+    std::vector<DimSize_t> mDims = {};
     /// @brief Pointer to the actual data implementation.
     detail::pimpl::ImplPtr_t mImpl = nullptr;
     /// @brief Pointer to the associated gradient Tensor instance.
     std::shared_ptr<Tensor> mGrad = nullptr;
-    /// @brief Logical coordinates of the first data (lexicographic order) of the tensor
-    /// active area
-    std::vector<Coord_t> mActiveAreaOrigin;
     /// @brief Valid neighborhood around active data
     Context mContext;
 
@@ -104,7 +108,7 @@ public:
      * @param dataType Sets the type of inserted data.
      */
     explicit Tensor(DataType dataType = DataType::Float32) :
-        Data(Type), mDataType(dataType), mDims({}), mContext{}
+        Data(Type), mDataType(dataType), mvActiveAreaOrigin({}), mDims({}), mContext{}
     {
         // ctor
     }
@@ -125,15 +129,20 @@ public:
     template<typename T, std::size_t SIZE_0>
     constexpr Tensor(Array1D<T, SIZE_0> &&arr) :
         Data(Type),
-        mDataType(NativeType<T>::type),
+        mDataType(detail::NativeType_v<T>),
+        mvActiveAreaOrigin(1, 0),
         // for aidge, on msvc, wrong constructor called, initializes a vector of size
-        // SIZE_0 instead of a single value vector holding SIZE_0 mDims({SIZE_0}),
-        mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+        // SIZE_0 instead of a single value vector holding SIZE_0
+        // mDims({SIZE_0}),
+        // mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+        //     mDataType, mvActiveAreaOrigin, mDims)),
         mSize(SIZE_0),
         mSizeM1(SIZE_0)
     {
         // work-around: calling assignement instead
         mDims = {SIZE_0};
+        mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+            mDataType, mvActiveAreaOrigin, mDims);
         copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
     }
 
@@ -143,7 +152,8 @@ public:
         resize({SIZE_0});
         if (!mImpl)
         {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+            mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+                mDataType, mvActiveAreaOrigin, mDims);
         }
         copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0);
         return *this;
@@ -158,9 +168,11 @@ public:
     template<typename T, std::size_t SIZE_0, std::size_t SIZE_1>
     constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr) :
         Data(Type),
-        mDataType(NativeType<T>::type),
+        mDataType(detail::NativeType_v<T>),
+        mvActiveAreaOrigin(2, 0),
         mDims({SIZE_0, SIZE_1}),
-        mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+        mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+            mDataType, mvActiveAreaOrigin, mDims)),
         mSize(SIZE_0 * SIZE_1),
         mSizeM1(SIZE_1)
     {
@@ -173,7 +185,8 @@ public:
         resize({SIZE_0, SIZE_1});
         if (!mImpl)
         {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+            mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+                mDataType, mvActiveAreaOrigin, mDims);
         }
         copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1);
         return *this;
@@ -189,9 +202,11 @@ public:
     template<typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
     constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> const &arr) :
         Data(Type),
-        mDataType(NativeType<T>::type),
+        mDataType(detail::NativeType_v<T>),
+        mvActiveAreaOrigin(3, 0),
         mDims({SIZE_0, SIZE_1, SIZE_2}),
-        mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+        mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+            mDataType, mvActiveAreaOrigin, mDims)),
         mSize(SIZE_0 * SIZE_1 * SIZE_2),
         mSizeM1(SIZE_1 * SIZE_2)
     {
@@ -204,7 +219,8 @@ public:
         resize({SIZE_0, SIZE_1, SIZE_2});
         if (!mImpl)
         {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+            mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+                mDataType, mvActiveAreaOrigin, mDims);
         }
         copyData(reinterpret_cast<Byte_t const *>(&(arr.data)), SIZE_0 * SIZE_1 * SIZE_2);
         return *this;
@@ -226,9 +242,11 @@ public:
         std::size_t SIZE_3>
     constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> const &arr) :
         Data(Type),
-        mDataType(NativeType<T>::type),
+        mDataType(detail::NativeType_v<T>),
+        mvActiveAreaOrigin(4, 0),
         mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
-        mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)),
+        mImpl(Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+            mDataType, mvActiveAreaOrigin, mDims)),
         mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3),
         mSizeM1(SIZE_1 * SIZE_2 * SIZE_3)
     {
@@ -248,7 +266,8 @@ public:
         resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         if (!mImpl)
         {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this);
+            mImpl = Registrar<Tensor>::create({"cpu", detail::NativeType_v<T>})(
+                mDataType, mvActiveAreaOrigin, mDims);
         }
         copyData(
             reinterpret_cast<Byte_t const *>(&(arr.data)),
@@ -396,6 +415,7 @@ public:
      * @brief Change the shape of the Tensor object according to the given argument.
      * @tparam DIM new dimensions.
      * @param dims
+     * @note Current API reset origin coordinates to null values
      */
     template<std::array<DimSize_t, 1>::size_type
                  DIM> // deducing std::array size_type and declaring DIM accordingly
@@ -408,6 +428,11 @@ public:
         resize(mDims);
     }
 
+    /**
+     * @brief Change the shape of the Tensor object according to the given argument.
+     * @param dims
+     * @note Current API reset origin coordinates to null values
+     */
     void resize(const std::vector<DimSize_t> &dims);
 
     /**
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index 638761954..e14a6dced 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -11,8 +11,8 @@
 
 #ifndef AIDGE_CORE_UTILS_TENSOR_UTILS_H_
 #define AIDGE_CORE_UTILS_TENSOR_UTILS_H_
-#include <cmath>  // std::abs
 #include "aidge/data/Tensor.hpp"
+#include <cmath> // std::abs
 
 /**
  * @brief Compare two :cpp:class:`Aidge::Tensor` value wise. The comparison function is:
@@ -22,27 +22,35 @@
  * If a tensor value is different from the other tensor return False
  * If the tensor does not have the same size, return False
  * If the datatype is not the same between each tensor return False
- * If the templated type does not correspond to the datatype of each tensor, raise an assertion error
+ * If the templated type does not correspond to the datatype of each tensor, raise an
+ * assertion error
  *
- * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
+ * @tparam T should correspond to the type of the tensor, define the type of the absolute
+ * and relative error
  * @param t1  first :cpp:class:`Aidge::Tensor` to test
  * @param t2  second :cpp:class:`Aidge::Tensor` to test
  * @param relative relative difference allowed (should be betwen 0 and 1)
  * @param absolute absolute error allowed (shoulmd be positive)
- * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
+ * @return true if both tensor are approximately equal and have the datatype, shape. Else
+ * return false
  */
-template <typename T>
-bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute){
+template<typename T>
+bool approxEq(Aidge::Tensor t1, Aidge::Tensor t2, float relative, float absolute)
+{
     assert(t1.dataType() == t2.dataType());
-    assert(t1.dataType() == NativeType<T>::type);
+    assert(t1.dataType() == Aidge::detail::NativeType_v<T>);
     assert(relative >= 0);
-    assert(absolute >= 0 && absolute<=1);
+    assert(absolute >= 0 && absolute <= 1);
 
-    if (t1.size() != t2.size()){
+    if (t1.size() != t2.size())
+    {
         return false;
     }
-    for(size_t i; i < t1.size(); ++i){
-        if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i))) > (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i)))))){
+    for (size_t i; i < t1.size(); ++i)
+    {
+        if (static_cast<float>(std::abs(t1.get<T>(i) - t2.get<T>(i)))
+            > (absolute + (relative * static_cast<float>(std::abs(t2.get<T>(i))))))
+        {
             return false;
         }
     }
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 719a335d0..719b7a57d 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -31,37 +31,38 @@ void addCtor(py::class_<
              Registrable<
                  Tensor,
                  std::tuple<std::string, DataType>,
-                 detail::pimpl::ImplPtr_t(const Tensor&)>>& mTensor)
+                 detail::pimpl::ImplPtr_t(
+                     DataType const i_DataType,
+                     std::vector<Coord_t> const& i_FirstDataCoordinates,
+                     std::vector<DimSize_t> const& i_Dimensions)>>& mTensor)
 {
     mTensor
-        .def(py::init(
-            [](py::array_t<T, py::array::c_style | py::array::forcecast> b)
-            {
-                /* Request a buffer descriptor from Python */
-                py::buffer_info info = b.request();
-                Tensor* newTensor = new Tensor();
-                newTensor->setDatatype(NativeType<T>::type);
-                const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
-                newTensor->resize(dims);
+        .def(py::init([](py::array_t<T, py::array::c_style | py::array::forcecast> b) {
+            /* Request a buffer descriptor from Python */
+            py::buffer_info info = b.request();
+            Tensor* newTensor = new Tensor();
+            newTensor->setDatatype(detail::NativeType_v<T>);
+            const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
+            newTensor->resize(dims);
 
-                // TODO : Find a better way to choose backend
-                std::set<std::string> availableBackends = Tensor::getAvailableBackends();
-                if (availableBackends.find("cpu") != availableBackends.end())
-                {
-                    newTensor->setBackend("cpu");
-                    newTensor->getImpl().copy(
-                        reinterpret_cast<Byte_t*>(info.ptr), newTensor->size());
-                }
-                else
-                {
-                    printf("Warning : Could not use aidge_cpu backend, verify you have "
-                           "`import aidge_cpu`\n");
-                }
+            // TODO : Find a better way to choose backend
+            std::set<std::string> availableBackends = Tensor::getAvailableBackends();
+            if (availableBackends.find("cpu") != availableBackends.end())
+            {
+                newTensor->setBackend("cpu");
+                newTensor->getImpl().copy(
+                    reinterpret_cast<Byte_t*>(info.ptr), newTensor->size());
+            }
+            else
+            {
+                printf("Warning : Could not use aidge_cpu backend, verify you have "
+                       "`import aidge_cpu`\n");
+            }
 
-                return newTensor;
-            }))
-        .def("__setitem__", (void(Tensor::*)(NbElts_t, T)) & Tensor::set)
-        .def("__setitem__", (void(Tensor::*)(std::vector<Coord_t>, T)) & Tensor::set);
+            return newTensor;
+        }))
+        .def("__setitem__", (void (Tensor::*)(NbElts_t, T)) & Tensor::set)
+        .def("__setitem__", (void (Tensor::*)(std::vector<Coord_t>, T)) & Tensor::set);
 }
 
 void init_Tensor(py::module& m)
@@ -70,11 +71,17 @@ void init_Tensor(py::module& m)
         Registrable<
             Tensor,
             std::tuple<std::string, DataType>,
-            detail::pimpl::ImplPtr_t(const Tensor&)>,
+            detail::pimpl::ImplPtr_t(
+                DataType const i_DataType,
+                std::vector<Coord_t> const& i_FirstDataCoordinates,
+                std::vector<DimSize_t> const& i_Dimensions)>,
         std::shared_ptr<Registrable<
             Tensor,
             std::tuple<std::string, DataType>,
-            detail::pimpl::ImplPtr_t(const Tensor&)>>>(m, "TensorRegistrable");
+            detail::pimpl::ImplPtr_t(
+                DataType const i_DataType,
+                std::vector<Coord_t> const& i_FirstDataCoordinates,
+                std::vector<DimSize_t> const& i_Dimensions)>>>(m, "TensorRegistrable");
 
     py::class_<
         Tensor,
@@ -83,7 +90,10 @@ void init_Tensor(py::module& m)
         Registrable<
             Tensor,
             std::tuple<std::string, DataType>,
-            detail::pimpl::ImplPtr_t(const Tensor&)>>
+            detail::pimpl::ImplPtr_t(
+                DataType const i_DataType,
+                std::vector<Coord_t> const& i_FirstDataCoordinates,
+                std::vector<DimSize_t> const& i_Dimensions)>>
         pyClassTensor(m, "Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
     pyClassTensor.def(py::init<>())
@@ -91,12 +101,11 @@ void init_Tensor(py::module& m)
         .def("dims", (const std::vector<DimSize_t>& (Tensor::*)() const) & Tensor::dims)
         .def("dtype", &Tensor::dataType)
         .def("size", &Tensor::size)
-        .def("resize", (void(Tensor::*)(const std::vector<DimSize_t>&)) & Tensor::resize)
+        .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) & Tensor::resize)
         .def("has_impl", &Tensor::hasImpl)
         .def(
             "get_coord",
-            [](Tensor& b, NbElts_t flatIdx) -> std::vector<Coord_t>
-            {
+            [](Tensor& b, NbElts_t flatIdx) -> std::vector<Coord_t> {
                 std::vector<Coord_t> Coords(b.dims().size());
                 /// @todo could be removed soon
                 assert(
@@ -118,8 +127,7 @@ void init_Tensor(py::module& m)
         .def("__len__", [](Tensor& b) -> NbElts_t { return b.size(); })
         .def(
             "__getitem__",
-            [](Tensor& b, NbElts_t idx) -> py::object
-            {
+            [](Tensor& b, NbElts_t idx) -> py::object {
                 if (idx >= b.size())
                     throw py::index_error();
                 switch (b.dataType())
@@ -136,8 +144,7 @@ void init_Tensor(py::module& m)
             })
         .def(
             "__getitem__",
-            [](Tensor& b, std::vector<Coord_t> coordIdx) -> py::object
-            {
+            [](Tensor& b, std::vector<Coord_t> coordIdx) -> py::object {
                 if (b.getIdx(coordIdx) >= b.size())
                     throw py::index_error();
                 switch (b.dataType())
@@ -152,52 +159,50 @@ void init_Tensor(py::module& m)
                         return py::none();
                 }
             })
-        .def_buffer(
-            [](Tensor& b) -> py::buffer_info
-            {
-                /// @todo const TensorImpl, yet a pointer to writable data is requested
-                /// later: unconsistent?
-                const TensorImpl& tensorImpl = b.getImpl();
+        .def_buffer([](Tensor& b) -> py::buffer_info {
+            /// @todo const TensorImpl, yet a pointer to writable data is requested
+            /// later: unconsistent?
+            const TensorImpl& tensorImpl = b.getImpl();
 
-                std::vector<DimSize_t> dims;
-                std::vector<NbElts_t> strides;
-                NbElts_t stride = tensorImpl.scalarSize();
+            std::vector<DimSize_t> dims;
+            std::vector<NbElts_t> strides;
+            NbElts_t stride = tensorImpl.scalarSize();
 
-                for (unsigned int dim = b.nbDims(); dim > 0; dim--)
-                {
-                    dims.push_back(b.dims()[dim - 1]);
-                    strides.push_back(stride);
-                    stride *= b.dims()[dim - 1];
-                }
-                std::reverse(dims.begin(), dims.end());
-                std::reverse(strides.begin(), strides.end());
+            for (unsigned int dim = b.nbDims(); dim > 0; dim--)
+            {
+                dims.push_back(b.dims()[dim - 1]);
+                strides.push_back(stride);
+                stride *= b.dims()[dim - 1];
+            }
+            std::reverse(dims.begin(), dims.end());
+            std::reverse(strides.begin(), strides.end());
 
-                std::string dataFormatDescriptor;
-                switch (b.dataType())
-                {
-                    case DataType::Float64:
-                        dataFormatDescriptor = py::format_descriptor<double>::format();
-                        break;
-                    case DataType::Float32:
-                        dataFormatDescriptor = py::format_descriptor<float>::format();
-                        break;
-                    case DataType::Int32:
-                        dataFormatDescriptor = py::format_descriptor<int>::format();
-                        break;
-                    default:
-                        throw py::value_error("Unsupported data format");
-                }
+            std::string dataFormatDescriptor;
+            switch (b.dataType())
+            {
+                case DataType::Float64:
+                    dataFormatDescriptor = py::format_descriptor<double>::format();
+                    break;
+                case DataType::Float32:
+                    dataFormatDescriptor = py::format_descriptor<float>::format();
+                    break;
+                case DataType::Int32:
+                    dataFormatDescriptor = py::format_descriptor<int>::format();
+                    break;
+                default:
+                    throw py::value_error("Unsupported data format");
+            }
 
-                return py::buffer_info(
-                    const_cast<void*>(reinterpret_cast<void const*>(
-                        tensorImpl.getDataAddress())), /* Pointer to buffer */
-                    tensorImpl.scalarSize(), /* Size of one scalar */
-                    dataFormatDescriptor, /* Python struct-style format descriptor */
-                    b.nbDims(), /* Number of dimensions */
-                    dims, /* Buffer dimensions */
-                    strides /* Strides (in bytes) for each index */
-                );
-            });
+            return py::buffer_info(
+                const_cast<void*>(reinterpret_cast<void const*>(
+                    tensorImpl.getDataAddress())), /* Pointer to buffer */
+                tensorImpl.scalarSize(), /* Size of one scalar */
+                dataFormatDescriptor, /* Python struct-style format descriptor */
+                b.nbDims(), /* Number of dimensions */
+                dims, /* Buffer dimensions */
+                strides /* Strides (in bytes) for each index */
+            );
+        });
 
     // TODO : If the ctor with the right data type does not exist, pybind will always
     // convert the data to INT ! Need to find a way to avoid this !
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index f44df8836..a59fe2116 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#include "aidge/data/Tensor.hpp"
 #include "aidge/backend/TensorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
 
 namespace Aidge
 {
@@ -42,8 +42,8 @@ void Tensor::setBackend(const std::string &name)
         {
             // Backend change: create new impl, copy from old to new and replace
             // impl
-            detail::pimpl::ImplPtr_t newImpl
-                = Registrar<Tensor>::create({name, mDataType})(*this);
+            detail::pimpl::ImplPtr_t newImpl = Registrar<Tensor>::create(
+                {name, mDataType})(mDataType, mvActiveAreaOrigin, mDims);
             if (hasData())
             {
                 assert(false && "So far copy between different backend is not supported");
@@ -57,7 +57,8 @@ void Tensor::setBackend(const std::string &name)
     }
     else
     {
-        mImpl = Registrar<Tensor>::create({name, mDataType})(*this);
+        mImpl = Registrar<Tensor>::create({name, mDataType})(
+            mDataType, mvActiveAreaOrigin, mDims);
     }
 }
 
@@ -123,8 +124,8 @@ void Tensor::setDatatype(const DataType dt)
     if (hasImpl() && (dataType() != dt))
     {
         mDataType = dt;
-        detail::pimpl::ImplPtr_t newImpl
-            = Registrar<Tensor>::create({mImpl->backend(), dt})(*this);
+        detail::pimpl::ImplPtr_t newImpl = Registrar<Tensor>::create(
+            {mImpl->backend(), dt})(mDataType, mvActiveAreaOrigin, mDims);
         if (hasData())
         {
             assert(
@@ -171,6 +172,7 @@ Tensor::Tensor(const Tensor &otherTensor) :
 void Tensor::resize(const std::vector<DimSize_t> &dims)
 {
     mDims = dims;
+    mvActiveAreaOrigin.assign(dims.size(), 0);
     computeSize();
     if (hasImpl())
     {
-- 
GitLab