Skip to content
Snippets Groups Projects
Commit f757aa48 authored by laurent soulier's avatar laurent soulier
Browse files

[fix/FlatId][FIX][DSGN] Redesigning flat index usage in Tensor API, previous...

[fix/FlatId][FIX][DSGN] Redesigning flat index usage in Tensor API, previous implementation was inconsistent
fix #58
fix #60
parent 312b3fa6
No related branches found
No related tags found
2 merge requests!41Support for any backend storage,!13Refactoring Tensor
Pipeline #35423 failed
......@@ -173,6 +173,7 @@ public:
/// is the tensor dimension.
/// @todo make it pure virtual, temporary default implementation provided for
/// testing and debugging
/// @todo May be deprecated as not use anywhere anymore
inline std::vector<std::size_t> const &getMemoryLayout() const noexcept
{
return mvLayout;
......@@ -190,50 +191,23 @@ public:
return mDataType;
};
/// @brief Get the logical coordinates of the data at given index
/// @param flatIdx 1D index of the value considering a flatten tensor.
/// @note The index is expressed in number of elements
/// @return Logical coordinates of the data at given index
/// @note return by value is efficient here as it relies on vector move semantic.
std::vector<Coord_t> getCoord(NbElts_t const flatIdx) const noexcept
{
std::vector<Coord_t> coordIdx(mvDimensions.size());
std::size_t In = flatIdx;
// trick for reverse loop without index underflow
// when i is 1, (i--)>0 is true but i contains 0 on ouput
// when i is 0, (i--)>0 is false leaving the loop, i has underflow but its
// value is not used anymore
/// @todo implement it with synchronized reverse iterators
for (std::size_t i = mvDimensions.size(); (i--) > 0;)
{
// computes coordinates in 0-based coordinates
coordIdx[i] = In % mvDimensions[i];
In = (In - coordIdx[i]) / mvDimensions[i];
// shift with respect to first data coordinates
coordIdx[i] = coordIdx[i] + mvFirstDataCoordinates[i];
}
return coordIdx;
};
/// @brief Get the linear index of the first Byte_t of the data at given
/// coordinates
/// @brief Get the linear index of the data at given coordinates
/// @param coordIdx coordinates of the desired data
/// @note The index is expressed in number of elements
/// @return Linear index of the first Byte_t of the data at given coordinates
NbElts_t getIdx(std::vector<Coord_t> const &coordIdx) const noexcept
/// @return Linear index of the the data at given coordinates
NbElts_t getImplIdx(std::vector<Coord_t> const &coordIdx) const noexcept
{
std::size_t flatIdx(0);
for (std::size_t i = 0; i < mvDimensions.size(); ++i)
for (std::size_t i = 0; i < mvDimensions.size()-1; ++i)
{
assert(
((coordIdx[i] - mvFirstDataCoordinates[i]) < mvDimensions[i])
&& (coordIdx[i] >= mvFirstDataCoordinates[i])
&& "Coordinates dimensions does not fit the dimensions of the "
"tensor");
flatIdx += ((coordIdx[i] - mvFirstDataCoordinates[i]) * mvLayout[i]);
flatIdx = (flatIdx+(coordIdx[i] - mvFirstDataCoordinates[i])) * mvDimensions[i+1];
}
return flatIdx / mScalarSize;
return flatIdx+(coordIdx[mvDimensions.size()-1] - mvFirstDataCoordinates[mvDimensions.size()-1]);
}
/// @brief Change TensorImpl dimensions
......@@ -278,6 +252,7 @@ private:
/// mDimensions[D-1], then mvLayout[D-1] is mScalarSize and
/// mvLayout[i] == mvLayout[i+1]*mDimensions[i+1];<br>
/// Besides mvLayout[0]*mDimensions[0] == mNbElts*mvLayout[D-1].
/// @todo May be deprecated as not use anywhere anymore
std::vector<std::size_t> mvLayout;
/// @brief Address of the very first data in memory (lexicographic order)
/// @details Points to a contiguous in-memory array such that, when cast to
......@@ -366,6 +341,7 @@ protected:
/// @brief Computes layout from Tensor total dimensions and size of a single data.
/// @note Assumes that the Tensor dimensions and data type are valid.
/// @note Should be used only during (re)initialization of storage.
/// @todo May be deprecated as not use anywhere anymore
void computeLayout()
{
std::size_t NbDims = mvDimensions.size();
......@@ -416,6 +392,7 @@ protected:
clonePropertiesExceptArea(i_TensorImpl);
mvDimensions = i_TensorImpl.mvDimensions;
mvFirstDataCoordinates = i_TensorImpl.mvFirstDataCoordinates;
/// @todo May be deprecated as not use anywhere anymore
mvLayout = i_TensorImpl.mvLayout;
}
/// @brief Release internal storage
......
......@@ -463,45 +463,66 @@ public:
return mDims.empty();
}
/// @brief Get element by its index
/// @brief Get element by its index within the active area
/// @tparam expectedType expected type of the stored element
/// @param idx offset in number of elements from the first stored element
/// @param idx index, in number of elements and lexical order, from the first data within active area
/// @return read-write reference to the element
/// @bug this function is plain wrong and can be used only on cpu backend
/// @todo redesign
/// @todo change API idx to become a logical index inside active area only
template<typename expectedType> expectedType &get(NbElts_t const idx) noexcept
{
///@todo : add assert expected Type compatible with datatype
///@todo : add assert idx < Size
// converts logical flat id to storage flat id
return *reinterpret_cast<expectedType *>(
getDataAddress() + idx * getScalarSize());
getDataAddress() + mImpl->getImplIdx(getCoord(idx)) * getScalarSize());
}
/// @brief Get element by its logical coordinates
/// @tparam expectedType expected type of the stored element
/// @param coordIdx coordinates of the desired data
/// @return read-write reference to the data
/// @bug this function is plain wrong and can be used only on cpu backend
/// @todo redesign
/// @todo change API idx to become a logical index inside active area only
template<typename expectedType>
expectedType &get(std::vector<Coord_t> const &coordIdx)
{
return get<expectedType>(getIdx(coordIdx));
///@todo : add assert expected Type compatible with datatype
///@todo : add assert coordIdx inside active area
return *reinterpret_cast<expectedType *>(
getDataAddress() + mImpl->getImplIdx(coordIdx) * getScalarSize());
}
/// @brief Set teh value of an element identified by its index
/// @brief Set the value of an element identified by its index within active area
/// @tparam expectedType expected type of the stored element
/// @param idx offset in number of elements from the first stored element
/// @return read-write reference to the element
/// @param idx index, in number of elements and lexical order, from the first data within active area
/// @param value value to set at given index
/// @bug this function is plain wrong and can be used only on cpu backend
/// @todo redesign
template<typename expectedType> void set(NbElts_t const idx, expectedType const value)
{
///@todo : add assert expected Type compatible with datatype
///@todo : add assert idx < Size
unsigned char *dataPtr = getDataAddress() + idx * getScalarSize();
// converts logical flat id to storage flat id
unsigned char *dataPtr = getDataAddress() + mImpl->getImplIdx(getCoord(idx)) * getScalarSize();
///@bug only valid for trivially copyable data
std::memcpy(dataPtr, &value, getScalarSize());
}
/// @brief Set the value of a data identified by its logical coordinates
/// @tparam expectedType expected type of the stored data
/// @param coordIdx logical coordinates of the data to set
/// @param value value to set at given coordinates
template<typename expectedType>
void set(std::vector<Coord_t> coordIdx, expectedType value)
{
set<expectedType>(getIdx(coordIdx), value);
///@todo : add assert expected Type compatible with datatype
///@todo : add assert coordIdx inside active area
unsigned char *dataPtr = getDataAddress() + mImpl->getImplIdx(coordIdx) * getScalarSize();
///@bug only valid for trivially copyable data
std::memcpy(dataPtr, &value, getScalarSize());
}
std::string toString() const;
......@@ -517,7 +538,7 @@ public:
* @brief From the the 1D index, return the coordinate of an element in the tensor.
*
* @param flatIdx 1D index of the value considering a flatten tensor.
* @return std::vector<DimSize_t>
* @return std::vector<Coord_t>
* @note return by value is efficient here as it relies on vector move semantic.
*/
std::vector<Coord_t> getCoord(NbElts_t const flatIdx) const noexcept;
......@@ -531,6 +552,7 @@ public:
NbElts_t getIdx(std::vector<Coord_t> const &coordIdx) const noexcept;
private:
/// @brief Getting the address of the very first data in memory (lexicographic
/// order), read only access to data
/// @details Points to a contiguous in-memory array such that, when cast to
......
......@@ -11,6 +11,8 @@
#include "aidge/data/Tensor.hpp"
#include <cassert>
namespace Aidge
{
/// @brief Assess data type, dimensions, backend and data are the same.
......@@ -64,12 +66,38 @@ void Tensor::copyFromHost(Byte_t const *const srcPtr, std::size_t const Bytes)
std::vector<Coord_t> Tensor::getCoord(NbElts_t const flatIdx) const noexcept
{
return mImpl->getCoord(flatIdx);
}
std::vector<Coord_t> coordIdx(mDims.size());
std::size_t Index = flatIdx;
// trick for reverse loop without index underflow
// when i is 1, (i--)>0 is true but i contains 0 on ouput
// when i is 0, (i--)>0 is false leaving the loop, i has underflow but its
// value is not used anymore
/// @todo implement it with synchronized reverse iterators
for (std::size_t i = mDims.size(); (i--) > 0;)
{
// computes coordinates in 0-based coordinates
coordIdx[i] = Index % mDims[i];
Index = (Index - coordIdx[i]) / mDims[i];
// shift with respect to first data coordinates
coordIdx[i] = coordIdx[i] + mvActiveAreaOrigin[i];
}
return coordIdx;
};
NbElts_t Tensor::getIdx(std::vector<Coord_t> const &coordIdx) const noexcept
{
return mImpl->getIdx(coordIdx);
std::size_t flatIdx(0);
for (std::size_t i = 0; i < mDims.size()-1; ++i)
{
assert(
((coordIdx[i] - mvActiveAreaOrigin[i]) < mDims[i])
&& (coordIdx[i] >= mvActiveAreaOrigin[i])
&& "Coordinates dimensions does not fit the dimensions of the "
"tensor");
flatIdx = (flatIdx+(coordIdx[i] - mvActiveAreaOrigin[i])) * mDims[i+1];
}
return flatIdx+(coordIdx[mDims.size()-1] - mvActiveAreaOrigin[mDims.size()-1]);
}
/// @brief Getting the address of the very first data in memory (lexicographic
......
......@@ -110,6 +110,8 @@ void Aidge::fuseBatchNorm(
}
}
/// @todo FIXME
/// Following get set are possibly wrong as in memory index might be different from logical index
// TODO : check if noBias==true is set, then set biasValue to 0
float biasValue = bias->get<float>(outChId);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment