Skip to content
Snippets Groups Projects

Refactoring Tensor

Closed laurent soulier requested to merge fix/TensorPIMPL into master
Compare and
20 files
+ 1540
1003
Compare changes
  • Side-by-side
  • Inline
Files
20
#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
#define AIDGE_CPU_DATA_TENSORIMPL_H_
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge {
template <class T>
class TensorImpl_cpu : public TensorImpl {
private:
const Tensor &mTensor; // Impl needs to access Tensor information, but is not
// supposed to change it!
std::vector<T> mData;
public:
static constexpr const char *Backend = "cpu";
TensorImpl_cpu(const Tensor &tensor) : TensorImpl(Backend), mTensor(tensor) {}
bool operator==(const TensorImpl &otherImpl) const override final {
std::size_t i = 0;
for (; i < mTensor.size() &&
mData[i] == reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl).data()[i];
++i) {
}
return i == mTensor.size();
}
static std::unique_ptr<TensorImpl_cpu> create(const Tensor &tensor) {
return std::make_unique<TensorImpl_cpu<T>>(tensor);
}
// native interface
const std::vector<T> &data() const { return mData; }
std::size_t scalarSize() const override { return sizeof(T); }
void copy(const void *src, NbElts_t length) override {
std::copy(static_cast<const T *>(src), static_cast<const T *>(src) + length,
static_cast<T *>(rawPtr()));
}
void *rawPtr() override {
lazyInit(mData);
return mData.data();
};
virtual ~TensorImpl_cpu() = default;
void setRawPtr(void *ptr) override final {
T *newPtr = static_cast<T *>(ptr);
mData = std::vector<T>(newPtr, newPtr + mTensor.size());
};
private:
void lazyInit(std::vector<T> &data) {
assert(mTensor.dataType() == NativeType<T>::type);
if (data.size() != mTensor.size()) data.resize(mTensor.size());
}
};
namespace {
static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
{"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
{"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
#define AIDGE_CPU_DATA_TENSORIMPL_H_
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
namespace Aidge
{
template<class T> class TensorImpl_cpu final : public TensorImpl
{
private:
std::vector<T> mData;
static constexpr const char *Backend = "cpu";
public:
TensorImpl_cpu(const Tensor &tensor) : TensorImpl(Backend, tensor)
{
}
/// @brief
/// @param otherImpl
/// @return true if contained data are the same, false if tensor have different
/// characteristics or if any value is different.
/// @bug Characteristics comparison is not implemented
/// @bug Comparison may not be pertinent for floating point types.
bool operator==(const TensorImpl &otherImpl) const override final
{
std::size_t i = 0;
for (; i < GetTensor().size()
&& mData[i]
== reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl).data()[i];
++i)
{
}
return i == GetTensor().size();
}
/// @bug Is it defined behavior to initialize a smart-pointer to base from new derived
/// @todo Check and fix implementation
static detail::pimpl::ImplPtr_t create(const Tensor &tensor)
{
return detail::pimpl::ImplPtr_t(new TensorImpl_cpu<T>(tensor));
}
// native interface
const std::vector<T> &data() const
{
return mData;
}
/// @bug Copy implementation, between different data types even on same backend is
/// plain wrong: undefined behaviour in violation of the strict-aliasing rule
void copy(const Byte_t *src, NbElts_t length) override
{
std::copy(
reinterpret_cast<const T *>(src),
reinterpret_cast<const T *>(src) + length,
reinterpret_cast<T *>(rawPtr()));
}
Byte_t *rawPtr() override
{
lazyInit();
return reinterpret_cast<Byte_t *>(mData.data());
};
// Byte_t const *rawPtr() const override
// {
// return mData.data();
// };
~TensorImpl_cpu() override = default;
/// @deprecated
void setRawPtr(unsigned char *ptr) override final
{
T *newPtr = reinterpret_cast<T *>(ptr);
mData = std::vector<T>(newPtr, newPtr + GetTensor().size());
SetDataAddress(reinterpret_cast<Byte_t *>(mData.data()));
};
private:
/// @brief Allocates a buffer without padding for copying a third-party data
void lazyInit()
{
assert(GetTensor().dataType() == NativeType<T>::type);
if (mData.size() != GetTensor().size())
{
mData.resize(GetTensor().size());
}
// update properties
SetScalarSize(sizeof(T));
SetNbElts(GetTensor().size());
SetDimensions(GetTensor().dims());
SetFirstCoordinates(std::vector<Coord_t>(GetTensor().dims().size(), 0));
ComputeLayout();
// Store storage address
SetDataAddress(reinterpret_cast<Byte_t *>(mData.data()));
}
};
namespace
{
static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
{"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
{"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
Loading