diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 96077573c4561d765731ddebdd45626e13291c6f..cca09afdd9e4a2fa694f405085264a6d332884a9 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -40,8 +40,7 @@ class Tensor : public Data, std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */ // Cached data - std::size_t mSize; /** Number of elements in the Tensor. */ - std::size_t mSizeM1; /** Number of elements in the N-1 first dimensions */ + std::size_t mSize = 0; /** Number of elements in the Tensor. */ public: static constexpr const char *Type = "Tensor"; @@ -52,10 +51,7 @@ class Tensor : public Data, */ Tensor(DataType dataType = DataType::Float32) : Data(Type), - mDataType(dataType), - mDims({}), - mSize(0), - mSizeM1(0) + mDataType(dataType) { // ctor } @@ -68,8 +64,7 @@ class Tensor : public Data, : Data(Type), mDataType(otherTensor.mDataType), mDims(otherTensor.mDims), - mSize(otherTensor.mSize), - mSizeM1(otherTensor.mSizeM1) + mSize(otherTensor.mSize) { if (otherTensor.hasImpl()) { mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), dataType()})(*this); @@ -90,8 +85,7 @@ class Tensor : public Data, mDataType(NativeType<T>::type), mDims({SIZE_0}), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), - mSize(SIZE_0), - mSizeM1(SIZE_0) { + mSize(SIZE_0) { mImpl->copyFromHost(&arr.data[0], SIZE_0); } @@ -117,8 +111,7 @@ class Tensor : public Data, mDataType(NativeType<T>::type), mDims({SIZE_0, SIZE_1}), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), - mSize(SIZE_0 * SIZE_1), - mSizeM1(SIZE_1) { + mSize(SIZE_0 * SIZE_1) { mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); } @@ -145,8 +138,7 @@ class Tensor : public Data, mDataType(NativeType<T>::type), mDims({SIZE_0, SIZE_1, SIZE_2}), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), - mSize(SIZE_0 * SIZE_1 * SIZE_2), - mSizeM1(SIZE_1 * SIZE_2) { + mSize(SIZE_0 * SIZE_1 * SIZE_2) { mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); } @@ -174,8 +166,7 @@ class Tensor : public Data, mDataType(NativeType<T>::type), mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}), mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), - mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3), - mSizeM1(SIZE_1 * SIZE_2 * SIZE_3) { + mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) { mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); } @@ -334,12 +325,6 @@ class Tensor : public Data, */ constexpr std::size_t size() const { return mSize; } - /** - * @brief Get the number of elements in the N-1 dimensions of the Tensor object. - * @return constexpr std::size_t - */ - constexpr std::size_t sizeM1() const { return mSizeM1; } - /** * @brief Change the dimensions of the Tensor object according to the given argument. * If the overall size is not changed (meaning we actually only performed a @@ -673,17 +658,10 @@ private: ///\bug not protected against overflow std::size_t computeSize() { if (mDims.empty()) { - mSizeM1 = DimSize_t(0); mSize = DimSize_t(0); } - else if (mDims.size() == 1) - { - mSizeM1 = mDims[0]; - mSize = mDims[0]; - } else { - mSizeM1 = std::accumulate(++mDims.begin(),mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>()); - mSize = static_cast<std::size_t>(mSizeM1 * mDims[0]); + mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>()); } return mSize;