Skip to content
Snippets Groups Projects
Commit 5a68b2db authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'user/cguillon/dev/py-tensor' into 'dev'

[Tensor]  Rework bindings for Tensor constructors

See merge request !172
parents 1c21952e 748cdabb
No related branches found
No related tags found
2 merge requests!212Version 0.3.0,!172[Tensor] Rework bindings for Tensor constructors
Pipeline #52322 passed
"""
Copyright (c) 2023 CEA-List
This program and the accompanying materials are made available under the
terms of the Eclipse Public License 2.0 which is available at
http://www.eclipse.org/legal/epl-2.0.
SPDX-License-Identifier: EPL-2.0
"""
import unittest
import numpy as np
import aidge_core
class test_tensor_scalar(unittest.TestCase):
"""Test tensor binding for scalar (0-rank) tensors
"""
def setUp(self):
pass
def tearDown(self):
pass
def _scalar_np_array(self, dtype=None):
return np.array(1, dtype=dtype)
def _scalar_np(self, dtype=None):
return np.int32(1).astype(dtype)
def test_np_array_int_to_tensor(self):
np_array = self._scalar_np_array(dtype="int8")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int8)
np_array = self._scalar_np_array(dtype="int16")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int16)
np_array = self._scalar_np_array(dtype="int32")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int32)
np_array = self._scalar_np_array(dtype="int64")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int64)
def test_np_array_uint_to_tensor(self):
np_array = self._scalar_np_array(dtype="uint8")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint8)
np_array = self._scalar_np_array(dtype="uint16")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint16)
np_array = self._scalar_np_array(dtype="uint32")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint32)
np_array = self._scalar_np_array(dtype="uint64")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint64)
def test_np_scalar_int_to_tensor(self):
np_array = self._scalar_np(dtype="int8")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int8)
np_array = self._scalar_np(dtype="int16")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int16)
np_array = self._scalar_np(dtype="int32")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int32)
np_array = self._scalar_np(dtype="int64")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.int64)
def test_np_scalar_uint_to_tensor(self):
np_array = self._scalar_np(dtype="uint8")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint8)
np_array = self._scalar_np(dtype="uint16")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint16)
np_array = self._scalar_np(dtype="uint32")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint32)
np_array = self._scalar_np(dtype="uint64")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.uint64)
def test_np_array_float_to_tensor(self):
np_array = self._scalar_np_array(dtype="float32")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.float32)
np_array = self._scalar_np_array(dtype="float64")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.float64)
def test_np_scalar_float_to_tensor(self):
np_array = self._scalar_np(dtype="float32")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.float32)
np_array = self._scalar_np(dtype="float64")
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.dtype.float64)
def test_getcoord_getidx_scalar(self):
np_array = self._scalar_np_array()
t = aidge_core.Tensor(np_array)
coord = t.get_coord(0)
self.assertEqual(tuple(coord), ())
idx = t.get_idx(coord)
self.assertEqual(idx, 0)
def test_indexing_scalar(self):
np_array = self._scalar_np_array()
t = aidge_core.Tensor(np_array)
val = t[0]
self.assertEqual(val, np_array[()])
def test_coord_indexing_scalar(self):
np_array = self._scalar_np_array()
t = aidge_core.Tensor(np_array)
val = t[()]
self.assertEqual(val, np_array[()])
if __name__ == '__main__':
unittest.main()
......@@ -23,17 +23,267 @@
namespace py = pybind11;
namespace Aidge {
using registrableTensor = Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>;
using pyTensorClass = py::class_<Tensor,
std::shared_ptr<Tensor>,
Data,
registrableTensor>;
using pyTensorRegistrableClass = py::class_<registrableTensor,
std::shared_ptr<registrableTensor>>;
using NumpyDType = py::detail::npy_api::constants;
// Map Numpy dtype ids to aidge datatypes.
// If a numpy dtype is not present, np array of this type is rejected.
static const std::map<NumpyDType, DataType> NumpyTypeNameAsNativeType = {
{ NumpyDType::NPY_INT8_, NativeType<std::int8_t>::type },
{ NumpyDType::NPY_INT16_, NativeType<std::int16_t>::type },
{ NumpyDType::NPY_INT32_, NativeType<std::int32_t>::type },
{ NumpyDType::NPY_INT64_, NativeType<std::int64_t>::type },
{ NumpyDType::NPY_UINT8_, NativeType<std::uint8_t>::type },
{ NumpyDType::NPY_UINT16_, NativeType<std::uint16_t>::type },
{ NumpyDType::NPY_UINT32_, NativeType<std::uint32_t>::type },
{ NumpyDType::NPY_UINT64_, NativeType<std::uint64_t>::type },
{ NumpyDType::NPY_FLOAT_, NativeType<float>::type },
{ NumpyDType::NPY_DOUBLE_, NativeType<double>::type },
};
// The Numpy API indexes that we need to convert bare numpy scalars
// They are not provided by the pybind API, hence we have to redo
// the API mapping for these.
// Ref for instance to the merge request proposal to add support
// for numpy scalars: https://github.com/pybind/pybind11/pull/3544/
// If merged upstream, we will be able to remove this code.
enum NUMPY_API_Slots {
PyArray_GetNDArrayCFeatureVersion = 211,
PyArray_TypeObjectFromType = 46,
PyArray_ScalarAsCtype = 62,
};
// Get the Numpy API ptr, we can't reuse the implementation of pybind
// as it is private. We use the same scheme and return the pointer to the
// Numpy API array.
static void **NumpyAPIPtr() {
static void **api_ptr = []() {
py::module_ m = py::module_::import("numpy.core.multiarray");
auto c = m.attr("_ARRAY_API");
void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), nullptr);
if (api_ptr == nullptr) {
AIDGE_THROW_OR_ABORT(py::import_error, "numpy binding: unable to get numpy _ARRAY_API pointer.");
}
using ftype = unsigned int (*)();
auto version = ftype(api_ptr[NUMPY_API_Slots::PyArray_GetNDArrayCFeatureVersion])();
if (version < 0x7) {
AIDGE_THROW_OR_ABORT(py::import_error, "numpy binding: requires numpy >= 1.7.0");
}
return api_ptr;
}();
return api_ptr;
}
// Wrapper for the Numpy API PyArray_ScalarAsCtype
static void NumpyScalarAsCtype(const py::object val, void *dst_ptr) {
using ftype = void (*)(PyObject *, void *);
void **api_ptr = NumpyAPIPtr();
((ftype)api_ptr[NUMPY_API_Slots::PyArray_ScalarAsCtype])(val.ptr(), dst_ptr);
}
// Wrapper for the Numpy API PyArray_TypeObjectFromType
static PyObject *NumpyTypeObjectFromType(const NumpyDType npy_dtype) {
using ftype = PyObject *(*)(int);
void **api_ptr = NumpyAPIPtr();
auto obj = ((ftype)api_ptr[NUMPY_API_Slots::PyArray_TypeObjectFromType])(npy_dtype);
return obj;
}
// Detects and convert (without cast) a numpy scalar of npy_dtype or returns false.
// If matches, fills the value and aidge dtype in the provided pointers.
static bool NPScalarGetValue(const py::object val_obj, const NumpyDType npy_dtype, void* dst_ptr, DataType* aidge_dtype_ptr) {
auto search_datatype = NumpyTypeNameAsNativeType.find(npy_dtype);
if (search_datatype == NumpyTypeNameAsNativeType.end()) {
return false;
}
auto pyobj_dtype = NumpyTypeObjectFromType(npy_dtype);
if (!isinstance(val_obj, pyobj_dtype)) {
return false;
}
*aidge_dtype_ptr = search_datatype->second;
NumpyScalarAsCtype(val_obj, dst_ptr);
return true;
}
using NativeValue = union {
std::int8_t i8; std::int16_t i16; std::int32_t i32; std::int64_t i64;
std::uint8_t u8; std::uint16_t u16; std::uint32_t u32; std::uint64_t u64;
float f32; double f64;
};
static bool getNPScalarNativeVal(const py::object obj, NativeValue* val_ptr, DataType* aidge_dtype_ptr) {
NativeValue native_val;
DataType native_dtype;
bool found = (NPScalarGetValue(obj, NumpyDType::NPY_INT32_, &native_val.i32, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_FLOAT_, &native_val.f32, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_INT8_, &native_val.i8, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_INT16_, &native_val.i16, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_INT64_, &native_val.i64, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_UINT8_, &native_val.u8, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_UINT16_, &native_val.u16, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_UINT32_, &native_val.u32, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_UINT64_, &native_val.u64, &native_dtype) ||
NPScalarGetValue(obj, NumpyDType::NPY_DOUBLE_, &native_val.f64, &native_dtype));
if (found) {
*val_ptr = native_val;
*aidge_dtype_ptr = native_dtype;
}
return found;
}
static bool getScalarNativeVal(const py::object obj, NativeValue* val_ptr, DataType* aidge_dtype_ptr) {
NativeValue native_val;
DataType native_dtype;
bool found;
// Try to match actual numpy scalars first in order to avoid unexpected casting
// when matching native python types as numpy does some automatic conversions
// behind the scene.
found = getNPScalarNativeVal(obj, &native_val, &native_dtype);
if (!found) {
// Then try to match int and float python scalar objects
if (py::isinstance<py::int_>(obj)) {
// Note that we use the following strategy for casting native python int:
// in order, either: int32, int64 or float32, the first that does not overflows
using caster_i32 = py::detail::type_caster<std::int32_t>;
using caster_i64 = py::detail::type_caster<std::int64_t>;
using caster_f32 = py::detail::type_caster<float>;
if (caster_i32().load(obj, false)) {
native_dtype = NativeType<std::int32_t>::type;
native_val.i32 = py::cast<std::int32_t>(obj);
} else if (caster_i64().load(obj, false)) {
native_dtype = NativeType<std::int64_t>::type;
native_val.i64 = py::cast<std::int64_t>(obj);
} else {
native_dtype = NativeType<float>::type;
native_val.f32 = py::cast<float>(obj);
}
found = true;
} else if (py::isinstance<py::float_>(obj)) {
// Note that for native python float, we cast to float32 which may loss
// precision as python floats are of type float64.
native_dtype = NativeType<float>::type;
native_val.f32 = py::cast<float>(obj);
found = true;
}
}
if (found) {
*val_ptr = native_val;
*aidge_dtype_ptr = native_dtype;
}
return found;
}
static void getConservativeNativeVal(const py::object obj, NativeValue *val_ptr, DataType * aidge_dtype_ptr) {
NativeValue native_val;
DataType native_dtype;
bool found;
found = getNPScalarNativeVal(obj, &native_val, &native_dtype);
if (!found) {
if (py::isinstance<py::int_>(obj)) {
// Note that for conservative cast we use our largests int types in order
// and otherwise fallback to double, i.e.: int64, then uint64, then double
using caster_i64 = py::detail::type_caster<std::int64_t>;
using caster_u64 = py::detail::type_caster<std::uint64_t>;
if (caster_i64().load(obj, false)) {
native_dtype = NativeType<std::int64_t>::type;
native_val.i64 = py::cast<std::int64_t>(obj);
} else if (caster_u64().load(obj, false)) {
native_dtype = NativeType<std::uint64_t>::type;
native_val.u64 = py::cast<std::uint64_t>(obj);
} else {
native_dtype = NativeType<double>::type;
native_val.f64 = py::cast<double>(obj);
}
found = true;
} else if (py::isinstance<py::float_>(obj)) {
// Note that for conservative cast we use double which is our larger float
native_dtype = NativeType<double>::type;
native_val.f64 = py::cast<double>(obj);
found = true;
}
}
if (!found) {
AIDGE_THROW_OR_ABORT(py::value_error, "Unsupported python type passed as scalar");
}
*val_ptr = native_val;
*aidge_dtype_ptr = native_dtype;
}
template<typename T>
void addCtor(py::class_<Tensor,
std::shared_ptr<Tensor>,
Data,
Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
mTensor.def(py::init([](
py::array_t<T, py::array::c_style | py::array::forcecast> b,
std::string backend = "cpu") {
static T castToNativeType(const py::object val_obj) {
NativeValue val;
DataType dtype;
getConservativeNativeVal(val_obj, &val, &dtype);
switch (dtype) {
case DataType::Int8:
return (T)val.i8;
case DataType::Int16:
return (T)val.i16;
case DataType::Int32:
return (T)val.i32;
case DataType::Int64:
return (T)val.i64;
case DataType::UInt8:
return (T)val.u8;
case DataType::UInt16:
return (T)val.u16;
case DataType::UInt32:
return (T)val.u32;
case DataType::UInt64:
return (T)val.u64;
case DataType::Float32:
return (T)val.f32;
case DataType::Float64:
return (T)val.f64;
}
AIDGE_THROW_OR_ABORT(py::cast_error, "Unexpected ly missing conversion to scalar type");
}
static void addScalarCtor(pyTensorClass& mTensor) {
// Contructor based on bare py::object in order to match either
// python scalars (int, float) or numpy scalars (np.int32, np.int64, ...).
// There is a merge request to support numpy scalars in pybind, through py::numpy_scalar<T>
// though it is not merged: https://github.com/pybind/pybind11/pull/3544/.
// Hence we use some helper functions defined above to try matching the different numpy scalar types.
mTensor.def(py::init([](py::object obj,
const std::string backend="cpu") {
NativeValue native_val;
DataType native_dtype;
bool found = getScalarNativeVal(obj, &native_val, &native_dtype);
if (!found) {
AIDGE_THROW_OR_ABORT(py::value_error, "Unsupported python type passed to Tensor constructor");
}
Tensor* newTensor = new Tensor();
newTensor->setDataType(native_dtype);
const std::vector<DimSize_t> input_dims(0);
newTensor->resize(input_dims);
std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find(backend) != availableBackends.end()){
newTensor->setBackend(backend);
newTensor->getImpl()->copyFromHost(static_cast<void *>(&native_val), newTensor->size());
}else{
AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend, backend);
}
return newTensor;
}), py::arg("val"), py::arg("backend")="cpu", py::kw_only());
}
template<typename T>
void addArrayCtor(pyTensorClass& mTensor) {
mTensor.def(py::init([](const py::array_t<T, py::array::c_style|py::array::forcecast> b,
const std::string backend = "cpu") {
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
Tensor* newTensor = new Tensor();
......@@ -44,37 +294,23 @@ void addCtor(py::class_<Tensor,
std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find(backend) != availableBackends.end()){
newTensor->setBackend(backend);
newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
newTensor->getImpl()->copyFromHost(static_cast<const T*>(info.ptr), newTensor->size());
}else{
AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend, backend);
}
return newTensor;
}), py::arg("array"), py::arg("backend")="cpu")
.def(py::init<T>(), py::arg("val"))
.def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
.def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
;
}), py::arg("array"), py::arg("backend")="cpu", py::kw_only());
}
void init_Tensor(py::module& m){
py::class_<Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>,
std::shared_ptr<Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>>(m,"TensorRegistrable");
py::class_<Tensor, std::shared_ptr<Tensor>,
Data,
Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> pyClassTensor
pyTensorRegistrableClass(m,"TensorRegistrable");
pyTensorClass pyClassTensor
(m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
pyClassTensor.def(py::init<>())
.def(py::init<const std::vector<std::size_t>&>(), py::arg("dims"))
pyClassTensor
.def(py::self + py::self)
.def(py::self - py::self)
.def(py::self * py::self)
......@@ -107,7 +343,7 @@ void init_Tensor(py::module& m){
.def("__len__", [](Tensor& b) -> size_t{
return b.size();
})
.def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
.def("__getitem__", [](const Tensor& b, const size_t idx)-> py::object {
if (idx >= b.size()) throw py::index_error();
switch(b.dataType()){
case DataType::Float64:
......@@ -126,11 +362,15 @@ void init_Tensor(py::module& m){
return py::cast(b.get<std::uint8_t>(idx));
case DataType::UInt16:
return py::cast(b.get<std::uint16_t>(idx));
case DataType::UInt32:
return py::cast(b.get<std::uint32_t>(idx));
case DataType::UInt64:
return py::cast(b.get<std::uint64_t>(idx));
default:
return py::none();
}
})
.def("__getitem__", [](Tensor& b, std::vector<size_t> coordIdx)-> py::object {
.def("__getitem__", [](const Tensor& b, const std::vector<size_t>& coordIdx)-> py::object {
if (b.getIdx(coordIdx) >= b.size()) throw py::index_error();
switch(b.dataType()){
case DataType::Float64:
......@@ -149,10 +389,90 @@ void init_Tensor(py::module& m){
return py::cast(b.get<std::uint8_t>(coordIdx));
case DataType::UInt16:
return py::cast(b.get<std::uint16_t>(coordIdx));
case DataType::UInt32:
return py::cast(b.get<std::uint32_t>(coordIdx));
case DataType::UInt64:
return py::cast(b.get<std::uint64_t>(coordIdx));
default:
return py::none();
}
})
.def("__setitem__", [](Tensor& b, const std::size_t idx, const py::object val) {
if (idx >= b.size()) throw py::index_error();
switch(b.dataType()){
case DataType::Float64:
b.set(idx, castToNativeType<double>(val));
break;
case DataType::Float32:
b.set(idx, castToNativeType<float>(val));
break;
case DataType::Int8:
b.set(idx, castToNativeType<std::int8_t>(val));
break;
case DataType::Int16:
b.set(idx, castToNativeType<std::int16_t>(val));
break;
case DataType::Int32:
b.set(idx, castToNativeType<std::int32_t>(val));
break;
case DataType::Int64:
b.set(idx, castToNativeType<std::int64_t>(val));
break;
case DataType::UInt8:
b.set(idx, castToNativeType<std::uint8_t>(val));
break;
case DataType::UInt16:
b.set(idx, castToNativeType<std::uint16_t>(val));
break;
case DataType::UInt32:
b.set(idx, castToNativeType<std::uint32_t>(val));
break;
case DataType::UInt64:
b.set(idx, castToNativeType<std::uint64_t>(val));
break;
default:
break;
}
})
.def("__setitem__", [](Tensor& b, const std::vector<size_t>& coordIdx, const py::object val) {
if (b.getIdx(coordIdx) >= b.size()) throw py::index_error();
switch(b.dataType()){
case DataType::Float64:
b.set(coordIdx, castToNativeType<double>(val));
break;
case DataType::Float32:
b.set(coordIdx, castToNativeType<float>(val));
break;
case DataType::Int8:
b.set(coordIdx, castToNativeType<std::int8_t>(val));
break;
case DataType::Int16:
b.set(coordIdx, castToNativeType<std::int16_t>(val));
break;
case DataType::Int32:
b.set(coordIdx, castToNativeType<std::int32_t>(val));
break;
case DataType::Int64:
b.set(coordIdx, castToNativeType<std::int64_t>(val));
break;
case DataType::UInt8:
b.set(coordIdx, castToNativeType<std::uint8_t>(val));
break;
case DataType::UInt16:
b.set(coordIdx, castToNativeType<std::uint16_t>(val));
break;
case DataType::UInt32:
b.set(coordIdx, castToNativeType<std::uint32_t>(val));
break;
case DataType::UInt64:
b.set(coordIdx, castToNativeType<std::uint64_t>(val));
break;
default:
break;
}
})
.def_buffer([](Tensor& b) -> py::buffer_info {
const std::shared_ptr<TensorImpl>& tensorImpl = b.getImpl();
......@@ -194,6 +514,12 @@ void init_Tensor(py::module& m){
case DataType::UInt16:
dataFormatDescriptor = py::format_descriptor<std::uint16_t>::format();
break;
case DataType::UInt32:
dataFormatDescriptor = py::format_descriptor<std::uint32_t>::format();
break;
case DataType::UInt64:
dataFormatDescriptor = py::format_descriptor<std::uint64_t>::format();
break;
default:
throw py::value_error("Unsupported data format");
}
......@@ -208,14 +534,55 @@ void init_Tensor(py::module& m){
);
});
// TODO : If the ctor with the right data type does not exist, pybind will always convert the data to INT !
// Need to find a way to avoid this !
addCtor<std::int32_t>(pyClassTensor);
addCtor<std::int64_t>(pyClassTensor);
addCtor<float>(pyClassTensor);
// #if SIZE_MAX != 0xFFFFFFFF
addCtor<double>(pyClassTensor);
// #endif
//
// Constructors overloads follow
// The implemented python constructor interface is:
// __init__(self, val: float|int|nd.ndarray = None, backend: str = "cpu", *, dims: list|tuple = None):
//
// Where:
// - if no arg is specified we get an undefined Tensor (no dims, no val);
// - if only dims is specified, will create an uninitialized tensor of the given dims and dtype float32;
// - otherwise if val is specified, dims is ignored and if val is a:
// - scalar: it will create a 0-rank scalar tensor of dtype:
// - if val is float: float32
// - if val is int: in this order: int32, int64 or float32 (the firsts which doe not overflows)
// - np.ndarray of a given np.dtype: it will create an equivalent tensor of dtype == np.dtype when supported
// - np.dtype scalar: it will create an equivalent scalar tensor of dtype == np.dtype when supported
//
// In order to implement this, we provide several overloads which are carefully ordered in order to fullfil
// the above requirements.
//
// Undefined Tensor
pyClassTensor.def(py::init<>());
// Uninitialized tensor of given dims and dtype float32
// Note that we force dims to be a keyword only argument
pyClassTensor.def(py::init<const std::vector<std::size_t>&>(), py::kw_only(), py::arg("dims"));
// N-D array tensors (including 0-D for from numpy 0-rank arrays)
// Note that in this case we have to define all supported Tensor dtypes
// otherwise the dtypes will be promoted by pybind unexpectedly.
// Note that these overloads must appear before the scalars overloads below
// otherwise pybind will try to demote 0-D arrays to scalar without preserving the
// np array dtype.
// TODO: Note that the list of supported numpy dtype is possibly incomplete there
// TODO: need to add some conversion functions to target dtypes not supported by numpy
// such as int4, int7, bfloat, ...
addArrayCtor<std::int8_t>(pyClassTensor);
addArrayCtor<std::int16_t>(pyClassTensor);
addArrayCtor<std::int32_t>(pyClassTensor);
addArrayCtor<std::int64_t>(pyClassTensor);
addArrayCtor<std::uint8_t>(pyClassTensor);
addArrayCtor<std::uint16_t>(pyClassTensor);
addArrayCtor<std::uint32_t>(pyClassTensor);
addArrayCtor<std::uint64_t>(pyClassTensor);
addArrayCtor<float>(pyClassTensor);
addArrayCtor<double>(pyClassTensor);
// Numpy Scalar argument
// Handles python scalars and numpy scalars with a single overload
addScalarCtor(pyClassTensor);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment