diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py index 825ca6100382116443699a00bcff27b9bbca028a..fb7ed0587fb074858e9f3766d5de0d43b39d1ef5 100644 --- a/aidge_core/unit_tests/test_operator_binding.py +++ b/aidge_core/unit_tests/test_operator_binding.py @@ -125,6 +125,23 @@ class test_operator_binding(unittest.TestCase): generic_op.forward() # Increment idx self.assertEqual(customImpl.idx, 1) + def test_magic_meth(self): + myVar = 2 + myBool = True + # Test dynamic attribute set + gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator() + gop.myBool = myBool + # Test variable set by kwargs + self.assertEqual(gop.myVar, myVar) + # Test set attr + self.assertEqual(gop.myBool, myBool) + + # Test static attribute set ! + prod = aidge_core.Producer([1]).get_operator() + self.assertEqual(prod.Constant, False) + prod.Constant = True # By default Constant is False + self.assertEqual(prod.Constant, True) + if __name__ == '__main__': diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py index a214a0e354c64b515d0a7ac24d81c85e116938ca..d479c98b20534daa804f6019b63d528883c2b568 100644 --- a/aidge_core/unit_tests/test_tensor.py +++ b/aidge_core/unit_tests/test_tensor.py @@ -10,16 +10,16 @@ SPDX-License-Identifier: EPL-2.0 import unittest import aidge_core - from functools import reduce + import numpy as np + class test_tensor(unittest.TestCase): - """ + """Test tensor binding """ def setUp(self): pass - def tearDown(self): pass @@ -35,10 +35,60 @@ class test_tensor(unittest.TestCase): idx = t.get_idx(coord) self.assertEqual(idx, i) -if __name__ == '__main__': - unittest.main() + def test_getavailable_backends(self): + self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends()) + + def test_numpy_int_to_tensor(self): + np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32) + # Numpy -> Tensor + t = aidge_core.Tensor(np_array) + self.assertEqual(t.dtype(), aidge_core.DataType.Int32) + for i_t, i_n in zip(t, np_array.flatten()): + self.assertTrue(i_t == i_n) + for i,j in zip(t.dims(), np_array.shape): + self.assertEqual(i,j) + def test_tensor_int_to_numpy(self): + np_array = np.arange(9).reshape(1,1,3,3) + # Numpy -> Tensor + t = aidge_core.Tensor(np_array) + # Tensor -> Numpy + nnarray = np.array(t) + for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()): + self.assertTrue(i_nn == i_n) + for i,j in zip(t.dims(), nnarray.shape): + self.assertEqual(i,j) + def test_numpy_int64_to_tensor(self): + np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64) + # Numpy -> Tensor + t = aidge_core.Tensor(np_array) + self.assertEqual(t.dtype(), aidge_core.DataType.Int64) + for i_t, i_n in zip(t, np_array.flatten()): + self.assertTrue(i_t == i_n) + for i,j in zip(t.dims(), np_array.shape): + self.assertEqual(i,j) + def test_numpy_float_to_tensor(self): + t = aidge_core.Tensor() + np_array = np.random.rand(1, 1, 3, 3).astype(np.float32) + # Numpy -> Tensor + t = aidge_core.Tensor(np_array) + self.assertEqual(t.dtype(), aidge_core.DataType.Float32) + for i_t, i_n in zip(t, np_array.flatten()): + self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference + for i,j in zip(t.dims(), np_array.shape): + self.assertEqual(i,j) + def test_get_set(self): + dims = [2,2,2] + np_array = np.arange(8).reshape(dims).astype(np.int32) + # Numpy -> Tensor + t = aidge_core.Tensor(np_array) + for i in range(8): + self.assertEqual(t[i], i) + t[i] = 5 + self.assertEqual(t[i], 5) +if __name__ == '__main__': + unittest.main() diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp index cc0979b07b07c2b95515eda09fda68a9ec4ac63e..c5b027e70f2153d106fbaccef166d85dbe1efe1f 100644 --- a/include/aidge/aidge.hpp +++ b/include/aidge/aidge.hpp @@ -15,6 +15,9 @@ #include "aidge/backend/OperatorImpl.hpp" #include "aidge/backend/TensorImpl.hpp" +#include "aidge/backend/cpu/data/TensorImpl.hpp" +#include "aidge/backend/cpu/data/GetCPUPtr.h" + #include "aidge/data/Data.hpp" #include "aidge/data/Tensor.hpp" diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp index a27f0317c59916facef970a3c1b91704fb485cd4..62f13acb3db81954a4fbb753a3e68e1c5a516402 100644 --- a/include/aidge/backend/TensorImpl.hpp +++ b/include/aidge/backend/TensorImpl.hpp @@ -67,19 +67,13 @@ private: class TensorImpl { public: TensorImpl() = delete; - TensorImpl(const char *backend, DeviceIdx_t device = 0) : mBackend(backend), mDevice(device){}; + TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {}; /** * Return the (backend, device) pair for this implementation. */ std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); } - /** - * Set the device ID for current backend. - * @param device New device ID on current backend. - */ - virtual void setDevice(DeviceIdx_t device) = 0; - /** * Copy data from the same device. * @param src Pointer on current implementation device. @@ -93,30 +87,34 @@ public: * @param srcDt Source data type. * @param src Pointer on current implementation device. * @param length Number of elements to copy. + * @param offset Destination offset (in number of elements). */ - virtual void copyCast(const void *src, NbElts_t length, const DataType srcDt) = 0; + virtual void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) = 0; /** * Copy data from an other device on the same backend. * @param device (backend, device) pair to copy from. The backend must match current implementation backend. * @param src Pointer on current implementation backend. * @param length Number of elements to copy. + * @param offset Destination offset (in number of elements). */ - virtual void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) = 0; + virtual void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) = 0; /** * Copy data from host. * @param src Host pointer to copy from. * @param length Number of elements to copy. + * @param offset Destination offset (in number of elements). */ - virtual void copyFromHost(const void *src, NbElts_t length) = 0; + virtual void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) = 0; /** * Copy data to host. * @param src Host pointer to copy to. * @param length Number of elements to copy. + * @param offset Source offset (in number of elements). */ - virtual void copyToHost(void *dst, NbElts_t length) const = 0; + virtual void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const = 0; /** * Return the raw device pointer. @@ -146,8 +144,22 @@ public: AIDGE_THROW_OR_ABORT(std::runtime_error, "Cannot set raw pointer for backend %s", mBackend); }; - virtual std::size_t size() const = 0; // Storage size - virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes) + /** + * Set the size, in number of elements, that must be stored. + */ + void resize(NbElts_t length) { + mNbElts = length; + } + + /** + * Return the number of elements stored. + */ + inline std::size_t size() const noexcept { return mNbElts; } + + /** + * Return the size (in bytes) of one element (scalar). + */ + virtual std::size_t scalarSize() const noexcept = 0; constexpr const char *backend() const { return mBackend; } virtual ~TensorImpl() = default; virtual bool operator==(const TensorImpl &othImpl) const = 0; @@ -156,12 +168,16 @@ public: * Copy from another backend. * @param srcImpl Source TensorImpl to copy from. * @param length Number of elements of size scalarSize() to copy + * @param srcOffset Source offset (in number of elements). + * @param dstOffset Destination offset (in number of elements). */ - void copyFrom(const TensorImpl& srcImpl, NbElts_t length); + void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0); protected: const char *mBackend; - DeviceIdx_t mDevice; + const DeviceIdx_t mDevice; + /// Number of elements (to be) stored + NbElts_t mNbElts; }; } // namespace Aidge diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h new file mode 100644 index 0000000000000000000000000000000000000000..47e3b07e8fa08cdcd714745a9a49bb03e30f79f5 --- /dev/null +++ b/include/aidge/backend/cpu/data/GetCPUPtr.h @@ -0,0 +1,24 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_ +#define AIDGE_CPU_DATA_GETCPUPTR_H_ + +#include "aidge/data/Tensor.hpp" + +namespace Aidge { +inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) { + const auto tensor = std::static_pointer_cast<Tensor>(data); + return tensor->getImpl()->hostPtr(tensor->getImplOffset()); +} +} // namespace Aidge + +#endif // AIDGE_CPU_DATA_GETCPUPTR_H_ \ No newline at end of file diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp new file mode 100644 index 0000000000000000000000000000000000000000..46dfae3d53b4b201507290bd538ea13737919c3e --- /dev/null +++ b/include/aidge/backend/cpu/data/TensorImpl.hpp @@ -0,0 +1,193 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_ +#define AIDGE_CPU_DATA_TENSORIMPL_H_ + +#include "aidge/backend/TensorImpl.hpp" +#include "aidge/data/Tensor.hpp" +#include "aidge/data/half.hpp" +#include "aidge/utils/Registrar.hpp" +#include "aidge/utils/Types.h" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/utils/future_std/span.hpp" + +namespace Aidge { + +template <class T> +class TensorImpl_cpu : public TensorImpl { +private: + /// Pointer to the data and its capacity + future_std::span<T> mData; + /// If this instance own the data, std::unique_ptr manages it + std::unique_ptr<T[]> mDataOwner; + +public: + static constexpr const char *Backend = "cpu"; + + TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {} + + bool operator==(const TensorImpl &otherImpl) const override final { + const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl); + AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts); + + std::size_t i = 0; + for (; i < mNbElts && + *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i)); + ++i) { + } + return i == mNbElts; + } + + static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) { + return std::make_shared<TensorImpl_cpu<T>>(device, length); + } + + inline std::size_t scalarSize() const noexcept override final { return sizeof(T); } + + void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { + const T* srcT = static_cast<const T *>(src); + T* dstT = static_cast<T *>(rawPtr(offset)); + + AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); + AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported"); + std::copy(srcT, srcT + length, dstT); + } + + void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final { + if (length == 0) { + return; + } + + T* dstT = static_cast<T *>(rawPtr(offset)); + AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); + switch (srcDt) + { + case DataType::Float64: + std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length, + dstT); + break; + case DataType::Float32: + std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length, + dstT); + break; + case DataType::Float16: + std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length, + dstT); + break; + case DataType::Int64: + std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length, + dstT); + break; + case DataType::UInt64: + std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length, + dstT); + break; + case DataType::Int32: + std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length, + dstT); + break; + case DataType::UInt32: + std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length, + dstT); + break; + case DataType::Int16: + std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length, + dstT); + break; + case DataType::UInt16: + std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length, + dstT); + break; + case DataType::Int8: + std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length, + dstT); + break; + case DataType::UInt8: + std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length, + dstT); + break; + default: + AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type."); + break; + } + } + + void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final { + AIDGE_ASSERT(device.first == Backend, "backend must match"); + AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend"); + copy(src, length, offset); + } + + inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final { + copy(src, length, offset); + } + + void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final { + const T* src = static_cast<const T*>(rawPtr(offset)); + AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity"); + std::copy(src, src + length, static_cast<T *>(dst)); + } + + void *rawPtr(NbElts_t offset = 0) override final { + lazyInit(); + return (mData.data() + offset); + }; + + const void *rawPtr(NbElts_t offset = 0) const override final { + AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr"); + return (mData.data() + offset); + }; + + void *hostPtr(NbElts_t offset = 0) override final { + lazyInit(); + return (mData.data() + offset); + }; + + const void *hostPtr(NbElts_t offset = 0) const override final { + AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr"); + return (mData.data() + offset); + }; + + void setRawPtr(void *ptr, NbElts_t length) override final { + AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity"); + mData = future_std::span<T>(static_cast<T *>(ptr), length); + mDataOwner.reset(); + }; + + virtual ~TensorImpl_cpu() = default; + +private: + void lazyInit() { + if (mData.size() < mNbElts) { + // Need more data, a re-allocation will occur + AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data"); + mDataOwner.reset(new T[mNbElts]); + mData = future_std::span<T>(mDataOwner.get(), mNbElts); + } + } +}; + +namespace { +static Registrar<Tensor> registrarTensorImpl_cpu_Float64( + {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_Float32( + {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_Float16( + {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_Int32( + {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create); +static Registrar<Tensor> registrarTensorImpl_cpu_Int64( + {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create); +} // namespace +} // namespace Aidge + +#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */ diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index 8129a900718169861dc2df4213cd3533d1dfe570..658c0b497d9753f1bdfd42a274dbb48970cb6d6b 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -32,15 +32,18 @@ namespace Aidge { * Contains a pointer to an actual contiguous implementation of data. */ class Tensor : public Data, - public Registrable<Tensor, std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor &)> { + public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)> { private: DataType mDataType; /** enum to specify data type. */ std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */ - std::unique_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */ + std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */ + std::shared_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */ + std::size_t mImplOffset = 0; std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */ // Cached data std::size_t mSize = 0; /** Number of elements in the Tensor. */ + bool mContiguous = true; public: static constexpr const char *Type = "Tensor"; @@ -57,21 +60,29 @@ class Tensor : public Data, } /** - * @brief Construct a new Tensor object copied from another one. + * @brief Construct a new Tensor object from another one (shallow copy). + * Data memory is not copied, but shared between the new Tensor and the + * initial one. + * * @param otherTensor */ - Tensor(const Tensor& otherTensor) - : Data(Type), - mDataType(otherTensor.mDataType), - mDims(otherTensor.mDims), - mSize(otherTensor.mSize) - { - if (otherTensor.hasImpl()) { - mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), dataType()})(*this); - mImpl->setDevice(otherTensor.mImpl->device().second); - // Same backend, same device => directly use copy() - mImpl->copy(otherTensor.mImpl->rawPtr(), mSize); + Tensor(const Tensor&) = default; + Tensor(Tensor&&) = default; + + /** + * Perform a deep copy of the tensor. + */ + Tensor clone() const { + Tensor newTensor(*this); + if (!newTensor.isContiguous()) { + newTensor.makeContiguous(); + } + else { + std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize); + newImpl->copy(mImpl->rawPtr(mImplOffset), mSize); + newTensor.setImpl(newImpl); } + return newTensor; } /** @@ -84,7 +95,8 @@ class Tensor : public Data, : Data(Type), mDataType(NativeType<T>::type), mDims({SIZE_0}), - mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), + mStrides({1}), + mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0)), mSize(SIZE_0) { mImpl->copyFromHost(&arr.data[0], SIZE_0); } @@ -93,9 +105,9 @@ class Tensor : public Data, constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) { resize({SIZE_0}); if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); + mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0); } - mImpl->copyFromHost(&arr.data[0], SIZE_0); + mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset); return *this; } @@ -110,7 +122,8 @@ class Tensor : public Data, : Data(Type), mDataType(NativeType<T>::type), mDims({SIZE_0, SIZE_1}), - mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), + mStrides({SIZE_1, 1}), + mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1)), mSize(SIZE_0 * SIZE_1) { mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); } @@ -119,9 +132,9 @@ class Tensor : public Data, constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) { resize({SIZE_0, SIZE_1}); if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); + mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1); } - mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); + mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset); return *this; } @@ -137,7 +150,8 @@ class Tensor : public Data, : Data(Type), mDataType(NativeType<T>::type), mDims({SIZE_0, SIZE_1, SIZE_2}), - mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), + mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}), + mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2)), mSize(SIZE_0 * SIZE_1 * SIZE_2) { mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); } @@ -146,9 +160,9 @@ class Tensor : public Data, constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) { resize({SIZE_0, SIZE_1, SIZE_2}); if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); + mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2); } - mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); + mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset); return *this; } @@ -165,7 +179,8 @@ class Tensor : public Data, : Data(Type), mDataType(NativeType<T>::type), mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}), - mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this)), + mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}), + mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)), mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) { mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); } @@ -174,33 +189,35 @@ class Tensor : public Data, constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) { resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3}); if (!mImpl) { - mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(*this); + mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); } - mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); + mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset); return *this; } /** - * @brief Copy dimensions, datatype and data of another Tensor. + * @brief Copy dimensions, datatype and data from another Tensor. + * If current Tensor already has an implementation, data is copied to the + * existing implementation. Tensor backend/device remain untouched. + * If current Tensor does not have an implementation, only a shallow copy + * is performed and the Tensor will share data with t. * @param t other Tensor object. * @return Tensor& */ Tensor &operator=(const Tensor &t) { - resize(t.dims()); - setDataType(t.dataType()); + resize(t.dims(), t.strides()); + setDataType(t.dataType(), false); // do not convert existing data if (t.hasImpl()) { if (hasImpl()) { - copyCastFrom(t); + copyFrom(t); } else { - mImpl = Registrar<Tensor>::create({t.mImpl->backend(), dataType()})(*this); - mImpl->setDevice(t.mImpl->device().second); - // Same backend, same device => directly use copy() - mImpl->copy(t.mImpl->rawPtr(), mSize); + // Perform a shallow copy only + setImpl(t.mImpl, t.mImplOffset); } } else { - mImpl = nullptr; + setImpl(nullptr); } return *this; } @@ -233,17 +250,15 @@ class Tensor : public Data, if (mImpl->device() != std::make_pair(name, device)) { // Backend change: create new impl, copy from old to new and replace // impl - std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(*this); - newImpl->setDevice(device); + std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mImpl->size()); if (copyFrom) { - newImpl->copyFrom(*mImpl, size()); + newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0); } - mImpl = std::move(newImpl); + setImpl(newImpl); } } else { - mImpl = Registrar<Tensor>::create({name, mDataType})(*this); - mImpl->setDevice(device); + mImpl = Registrar<Tensor>::create({name, mDataType})(device, mSize); } } @@ -273,21 +288,32 @@ class Tensor : public Data, */ void setDataType(const DataType dt, bool copyCast = true) { if (mImpl && (dataType() != dt)) { - std::unique_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(*this); + std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mImpl->size()); if (copyCast) { - newImpl->copyCast(mImpl->rawPtr(), size(), mDataType); + newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size()); } - mImpl = std::move(newImpl); + setImpl(newImpl); } mDataType = dt; } /** * @brief Get the Impl object - * @return constexpr const std::unique_ptr<TensorImpl>& + * @return constexpr const std::shared_ptr<TensorImpl>& */ - constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; } - constexpr const std::unique_ptr<TensorImpl> &getImpl() const { return mImpl; } + constexpr const std::shared_ptr<TensorImpl> &getImpl() const { return mImpl; } + constexpr std::size_t getImplOffset() const { return mImplOffset; } + + /** + * @brief Set the Impl object + * + * @param impl New impl shared pointer + * @param implOffset Storage offset in this new impl for this Tensor + */ + void setImpl(std::shared_ptr<TensorImpl> impl, std::size_t implOffset = 0) { + mImpl = impl; + mImplOffset = implOffset; + } /** * @brief Return if an implementaiton has been associated. @@ -319,6 +345,18 @@ class Tensor : public Data, */ constexpr const std::vector<DimSize_t> &dims() const { return mDims; } + /** + * @brief Get strides of the Tensor object. + * @return constexpr const std::vector<DimSize_t>& + */ + constexpr const std::vector<DimSize_t> &strides() const { return mStrides; } + + /** + * @brief Return true if Tensor is contiguous in memory. + * @return bool + */ + constexpr bool isContiguous() const { return mContiguous; } + /** * @brief Get the number of elements in the Tensor object. * @return constexpr std::size_t @@ -350,10 +388,49 @@ class Tensor : public Data, * one, all previous data is invalided. Otherwise, previous data may or may * not remain valid, depending on the backend implementation. * @param dims New dimensions + * @param strides Stride of the tensor (if not specified, "nested" stride is used) */ - void resize(const std::vector<DimSize_t> &dims) { - mDims = dims; - computeSize(); + void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>()) { + bool checkContiguous = true; + if (strides.empty()) { + strides.resize(dims.size()); + size_t expectedStride = 1; + for (int dim = dims.size() - 1; dim >= 0; --dim) { + strides[dim] = expectedStride; + expectedStride*= dims[dim]; + } + checkContiguous = false; + } + else { + AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims"); + } + + if (mImpl.use_count() > 1) { + // Here we could also create a new storage for this tensor in this case + // But, is it more likely that the user really wants this, or that he did a mistake? + AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage"); + } + else { + mDims = dims; + mStrides = strides; + + mContiguous = true; + if (checkContiguous) { + size_t expectedStride = 1; + for (int dim = dims.size() - 1; dim >= 0; --dim) { + if (strides[dim] != expectedStride) { + mContiguous = false; + break; + } + expectedStride*= dims[dim]; + } + } + + computeSize(); + if (mImpl) { + mImpl->resize(mSize); + } + } } /** @@ -367,25 +444,25 @@ class Tensor : public Data, const expectedType& get(std::size_t idx) const { AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type"); AIDGE_ASSERT(idx < mSize, "idx out of range"); - return *reinterpret_cast<expectedType *>(mImpl->hostPtr(idx)); + return *reinterpret_cast<expectedType *>(mImpl->hostPtr(mImplOffset + idx)); } template <typename expectedType> const expectedType& get(std::vector<std::size_t> coordIdx) const { - return get<expectedType>(getIdx(coordIdx)); + return get<expectedType>(getStorageIdx(coordIdx)); } template <typename expectedType> void set(std::size_t idx, expectedType value){ AIDGE_ASSERT(NativeType<expectedType>::type == mDataType, "wrong data type"); AIDGE_ASSERT(idx < mSize, "idx out of range"); - expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(idx)); + expectedType* dataPtr = static_cast<expectedType*>(mImpl->hostPtr(mImplOffset + idx)); *dataPtr = value; } template <typename expectedType> void set(std::vector<std::size_t> coordIdx, expectedType value){ - set<expectedType>(getIdx(coordIdx), value); + set<expectedType>(getStorageIdx(coordIdx), value); } @@ -449,9 +526,9 @@ class Tensor : public Data, for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) { res += spaceString + "{"; for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) { - res += " " + ptrToString(mDataType, mImpl->hostPtr(), counter++) + ","; + res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ","; } - res += " " + ptrToString(mDataType, mImpl->hostPtr(), counter++) + "}"; + res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}"; if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) { res += ","; } @@ -471,7 +548,7 @@ class Tensor : public Data, } else { res += "{"; for (DimSize_t j = 0; j < dims()[0]; ++j) { - res += " " + ptrToString(mDataType, mImpl->hostPtr(), j) + ((j < dims()[0]-1) ? "," : " "); + res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " "); } } res += "}"; @@ -493,6 +570,7 @@ class Tensor : public Data, /** * @brief From the the 1D contiguous index, return the coordinate of an element in the tensor. + * Beware: do not use this function with the storage index! * * @param flatIdx 1D contiguous index of the value considering a flatten, contiguous, tensor. * @return std::vector<DimSize_t> @@ -512,6 +590,8 @@ class Tensor : public Data, * @brief From the coordinate returns the 1D contiguous index of an element in the tensor. * If the number of coordinates is inferior to the number of dimensions, * the remaining coordinates are assumed to be 0. + * Beware: the contiguous index will only correspond to the storage index + * if the tensor is contiguous! * * @param coordIdx Coordinate to an element in the tensor * @return DimSize_t Contiguous index @@ -527,6 +607,51 @@ class Tensor : public Data, return flatIdx + coordIdx[i]; } + /** + * @brief From the coordinate returns the 1D storage index of an element in the tensor. + * If the number of coordinates is inferior to the number of dimensions, + * the remaining coordinates are assumed to be 0. + * + * @param coordIdx Coordinate to an element in the tensor + * @return DimSize_t Storage index + */ + std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const { + AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions"); + return std::inner_product(coordIdx.begin(), coordIdx.end(), mStrides.begin(), DimSize_t(0)); + } + + /** + * Returns a sub-tensor with one or more dimension less. + * For instance, t.extract({1}) on a CHW tensor will return the HW tensor + * of channel #1. + * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor + * of batch #0 and channel #1. + * No memory copy is performed, the returned tensor does not own the memory. + * If the number of coordinates matches the number of dimensions, an empty + * tensor is returned. + * It current tensor was contiguous, the returned tensor is garanteed to be + * contiguous as well. + * + * @param coordIdx Coordinates of the sub-tensor to extract + * @return Tensor Sub-tensor. + */ + Tensor extract(const std::vector<std::size_t>& coordIdx) const; + + /** + * Returns a sub-tensor at some coordinate and with some dimension. + * + * @param coordIdx First coordinates of the sub-tensor to extract + * @param dims Dimensions of the sub-tensor to extract + * @return Tensor Sub-tensor. + */ + Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const; + + /** + * Make the tensor's storage contiguous, if it is not already the case. + * If not contiguous, a new memory space is allocated. + */ + void makeContiguous(); + /** * Copy-cast data from a Tensor on the same device. * If current tensor backend/device is set and is different from src, an @@ -572,6 +697,20 @@ class Tensor : public Data, copyCastFrom(src, movedSrc); } + /** + * Return a reference to a Tensor that is garanteed to be contiguous: + * - itself, if already contiguous; + * - the provided Tensor, overwritten with the copied data. + * The data type, backend and device stay the same. + * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. + * The shared_ptr does not need to be initialized. No new memory allocation + * will occur if fallback has already been allocated with the right + * type/size/device. + * @return Reference to either itself or to fallback. + */ + Tensor& refContiguous(std::shared_ptr<Tensor>& fallback); + const Tensor& refContiguous(std::shared_ptr<Tensor>& fallback) const; + /** * Return a reference to a Tensor casted to the desired data type: * - itself, if already at the right data type; @@ -642,6 +781,43 @@ class Tensor : public Data, return refCastFrom(fallback, targetReqs.dataType(), device.first, device.second); } + /** + * Return a reference to a Tensor on desired data type and backend/device: + * - itself, if already with the right characteristics; + * - the provided Tensor, overwritten with the right characteristics. + * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on + * the same fallback, it remains valid, otherwise, data is invalid. + * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. + * The shared_ptr does not need to be initialized. No new memory allocation + * will occur if fallback has already been allocated with the right + * type/size/device. + * @param dt The desired data type. + * @param backend The desired backend. + * @param device The desired device. + * @return Reference to either itself or to fallback. + */ + Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0); + const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const; + + /** + * Return a reference to a Tensor with same characteristics + * (data type, backend/device) as targetReqs Tensor: + * - itself, if already with the right characteristics; + * - the provided Tensor, overwritten with the right characteristics. + * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on + * the same fallback, it remains valid, otherwise, data is invalid. + * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary. + * The shared_ptr does not need to be initialized. No new memory allocation + * will occur if fallback has already been allocated with the right + * type/size/device. + * @param targetReqs Tensor with the desired target characteristics. + * @return Reference to either itself or to fallback. + */ + Tensor& ref(std::shared_ptr<Tensor>& fallback, const Tensor& targetReqs) { + const auto& device = targetReqs.getImpl()->device(); + return ref(fallback, targetReqs.dataType(), device.first, device.second); + } + private: ///\bug not protected against overflow void computeSize() { diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp index 20082eed28825ade9d62fb5d4e081840d3bd4442..f6647f99151304d0cf083aed109cc642c9f1ecc2 100644 --- a/include/aidge/operator/Gather.hpp +++ b/include/aidge/operator/Gather.hpp @@ -27,25 +27,26 @@ #include "aidge/utils/Types.h" namespace Aidge { -enum class GatherAttr { Axis }; +enum class GatherAttr { Indices, GatheredShape, Axis }; class Gather_Op : public OperatorTensor, public Registrable<Gather_Op, std::string, std::unique_ptr<OperatorImpl>(const Gather_Op&)>, - public StaticAttributes<GatherAttr, int> { + public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> { public: static const std::string Type; Gather_Op() = delete; - - using Attributes_ = StaticAttributes<GatherAttr, int>; + using Attributes_ = StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t>; template <GatherAttr e> using attr = typename Attributes_::template attr<e>; - Gather_Op(int axis) - : OperatorTensor(Type, 2, 0, 1), + Gather_Op(const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis) + : OperatorTensor(Type, 1, 0, 1), Attributes_( + attr<GatherAttr::Indices>(indices), + attr<GatherAttr::GatheredShape>(gatheredShape), attr<GatherAttr::Axis>(axis)) {} @@ -76,21 +77,21 @@ public: } static const std::vector<std::string> getInputsName(){ - return {"data_input", "indexes"}; + return {"data_input"}; } static const std::vector<std::string> getOutputsName(){ return {"data_output"}; } }; -inline std::shared_ptr<Node> Gather(int axis = 0, const std::string& name = "") { - return std::make_shared<Node>(std::make_shared<Gather_Op>(axis), name); +inline std::shared_ptr<Node> Gather( const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis = 0, const std::string& name = "") { + return std::make_shared<Node>(std::make_shared<Gather_Op>(indices, gatheredShape, axis), name); } } // namespace Aidge namespace { template <> -const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis"}; +const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Indices", "GatheredShape", "Axis"}; } #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */ diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 12a7425f3339b7fbc0ae010639aacf23d97b0f5f..4a073bc525640846c28d718d09741a67d499830e 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -29,17 +29,17 @@ enum class SliceAttr { Starts, Ends, Axes }; class Slice_Op : public OperatorTensor, public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>, - public StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>> { + public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> { public: static const std::string Type; Slice_Op() = delete; - using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>>; + using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>>; template <SliceAttr e> using attr = typename Attributes_::template attr<e>; - Slice_Op(const std::vector<std::int32_t>& starts, const std::vector<std::int32_t>& ends, const std::vector<std::int32_t>& axes) + Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int64_t>& axes) : OperatorTensor(Type, 1, 0, 1), Attributes_(attr<SliceAttr::Starts>(starts), attr<SliceAttr::Ends>(ends), @@ -94,9 +94,9 @@ public: * @param name Name of the Operator. * @return std::shared_ptr<Node> A Node containing the Operator. */ -inline std::shared_ptr<Node> Slice(const std::vector<std::int32_t> starts, - const std::vector<std::int32_t> ends, - const std::vector<std::int32_t> axes, +inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t> starts, + const std::vector<std::int64_t> ends, + const std::vector<std::int64_t> axes, const std::string &name = "") { // FIXME: properly handle default w&b initialization in every cases return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes), name); diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp index d3444000191022b575adaf1430319479daa5d4fc..927686cfd5cca910c5ffb25364ae4bc971ad18bf 100644 --- a/include/aidge/utils/Attributes.hpp +++ b/include/aidge/utils/Attributes.hpp @@ -69,6 +69,11 @@ public: * be agnostic from its return type. */ virtual py::object getAttrPy(const std::string& name) const = 0; + /* Bindable set function, does not recquire any templating. + * This is thanks to py::object which allow the function to + * be agnostic from ``value`` type. + */ + virtual void setAttrPy(const std::string& name, py::object&& value) = 0; #endif virtual ~Attributes() {} }; diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp index 2af8f47e9420f266cc6eca21f167944c761db7ea..44c3b1f5e8df833344fa9b7fe72bdb4ef1e0ec12 100644 --- a/include/aidge/utils/DynamicAttributes.hpp +++ b/include/aidge/utils/DynamicAttributes.hpp @@ -135,7 +135,7 @@ public: assert(res.second && "attribute already exists"); } - void setAttrPy(const std::string& name, py::object&& value) + void setAttrPy(const std::string& name, py::object&& value) override final { auto resPy = mAttrsPy.emplace(std::make_pair(name, value)); if (!resPy.second) @@ -204,7 +204,7 @@ private: // Stores C++ attributes (copy) and Python-only attributes // Code should be compiled with -fvisibility=hidden // See https://pybind11.readthedocs.io/en/stable/faq.html: - // “‘SomeClass’ declared with greater visibility than the type of its + // “‘SomeClass’ declared with greater visibility than the type of its // field ‘SomeClass::member’ [-Wattributes]†// This map will only be populated if Python interpreter is running std::map<std::string, py::object> mAttrsPy; diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp index a90a08b01915c461bc8951c08ee2dbd979b957de..be00932e47a93cc4349d39f6cad542cec506c38a 100644 --- a/include/aidge/utils/StaticAttributes.hpp +++ b/include/aidge/utils/StaticAttributes.hpp @@ -202,6 +202,22 @@ public: } #ifdef PYBIND + /** + * @brief Return a set of attributes defined. + * This method is used to automatically retrieve attributes in the documentation. + * This method is a duplicate of ``getAttrsName`` but static. + * + * @return std::set<std::string> + */ + static std::set<std::string> staticGetAttrsName() { + std::set<std::string> attrsName; + for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) { + attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]); + } + return attrsName; + } + + py::object getAttrPy(const std::string& name) const override { for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) { if (name == EnumStrings<ATTRS_ENUM>::data[i]) { @@ -212,7 +228,22 @@ public: } AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str()); - }; + } + + + void setAttrPy(const std::string& name, py::object&& value) override final{ + for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) { + if (name == EnumStrings<ATTRS_ENUM>::data[i]) { + // Cannot update attribute using reference has it would require templating + // Use a dirty + auto tmpAttr = py::cast(mAttrs); + py::detail::accessor_policies::tuple_item::set(tmpAttr, static_cast<py::size_t>(i), value); + mAttrs = py::cast<std::tuple<T...>>(tmpAttr); + return; + } + } + AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str()); + } #endif private: diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index 688a519e593dcde1fe69e3324c81163250eeb42b..9fbf08d0b782b6f39b2bef3d0b3ab918f6789ac0 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -30,25 +30,27 @@ void addCtor(py::class_<Tensor, Data, Registrable<Tensor, std::tuple<std::string, DataType>, - std::unique_ptr<TensorImpl>(const Tensor&)>>& mTensor){ - mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) { + std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>& mTensor){ + mTensor.def(py::init([]( + py::array_t<T, py::array::c_style | py::array::forcecast> b, + std::string backend = "cpu") { /* Request a buffer descriptor from Python */ py::buffer_info info = b.request(); Tensor* newTensor = new Tensor(); newTensor->setDataType(NativeType<T>::type); const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end()); newTensor->resize(dims); - // TODO : Find a better way to choose backend + std::set<std::string> availableBackends = Tensor::getAvailableBackends(); - if (availableBackends.find("cpu") != availableBackends.end()){ - newTensor->setBackend("cpu"); + if (availableBackends.find(backend) != availableBackends.end()){ + newTensor->setBackend(backend); newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size()); }else{ - printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n"); + AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend %s, verify you have `import aidge_backend_%s`.\n", backend.c_str(), backend.c_str()); } return newTensor; - })) + }), py::arg("array"), py::arg("backend")="cpu") .def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set) .def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set) ; @@ -58,16 +60,16 @@ void addCtor(py::class_<Tensor, void init_Tensor(py::module& m){ py::class_<Registrable<Tensor, std::tuple<std::string, DataType>, - std::unique_ptr<TensorImpl>(const Tensor&)>, + std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>, std::shared_ptr<Registrable<Tensor, std::tuple<std::string, DataType>, - std::unique_ptr<TensorImpl>(const Tensor&)>>>(m,"TensorRegistrable"); + std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>>(m,"TensorRegistrable"); py::class_<Tensor, std::shared_ptr<Tensor>, Data, Registrable<Tensor, std::tuple<std::string, DataType>, - std::unique_ptr<TensorImpl>(const Tensor&)>> pyClassTensor + std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>> pyClassTensor (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol()); pyClassTensor.def(py::init<>()) @@ -76,7 +78,7 @@ void init_Tensor(py::module& m){ .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims) .def("dtype", &Tensor::dataType) .def("size", &Tensor::size) - .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) &Tensor::resize) + .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize) .def("has_impl", &Tensor::hasImpl) .def("get_coord", &Tensor::getCoord) .def("get_idx", &Tensor::getIdx) @@ -118,7 +120,7 @@ void init_Tensor(py::module& m){ } }) .def_buffer([](Tensor& b) -> py::buffer_info { - const std::unique_ptr<TensorImpl>& tensorImpl = b.getImpl(); + const std::shared_ptr<TensorImpl>& tensorImpl = b.getImpl(); std::vector<size_t> dims; std::vector<size_t> strides; diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp index f87cd5dd66f44535ff895f73b160fc5988e1009a..dc586b7d947c6d8433fabe2fbfaa0990de5c132a 100644 --- a/python_binding/operator/pybind_AvgPooling.cpp +++ b/python_binding/operator/pybind_AvgPooling.cpp @@ -26,7 +26,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { - py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor, Attributes>( + py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>( m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, DIM> &, @@ -34,7 +34,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) { py::arg("kernel_dims"), py::arg("stride_dims")) .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName) - .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName); + .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName) + .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName); m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::string& name, diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp index 411a2e1b6ae78065a79b92f25c23dac13e341997..c81c7ade4de50e6879fd32c59f6574b14c473398 100644 --- a/python_binding/operator/pybind_BatchNorm.cpp +++ b/python_binding/operator/pybind_BatchNorm.cpp @@ -21,9 +21,10 @@ namespace Aidge { template <DimSize_t DIM> void declare_BatchNormOp(py::module& m) { - py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) + py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName) - .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName); + .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName) + .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName); m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp index 2b7e5d6b99194e914e48dc6263d0bdcd6a4a8a2f..8cdd138b8cde2a582e9f569a17ae33811637092c 100644 --- a/python_binding/operator/pybind_Concat.cpp +++ b/python_binding/operator/pybind_Concat.cpp @@ -19,9 +19,10 @@ namespace py = pybind11; namespace Aidge { void init_Concat(py::module& m) { - py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance()) + py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance()) .def("get_inputs_name", &Concat_Op::getInputsName) - .def("get_outputs_name", &Concat_Op::getOutputsName); + .def("get_outputs_name", &Concat_Op::getOutputsName) + .def("attributes_name", &Concat_Op::staticGetAttrsName); m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp index 2200cd3fec1450011d6e0b5197f8b99b4dfeb4c3..455ea4024438b97b7ac6f07e5fc6722658b42ea4 100644 --- a/python_binding/operator/pybind_Conv.cpp +++ b/python_binding/operator/pybind_Conv.cpp @@ -24,7 +24,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { - py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor, Attributes>( + py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>( m, ("ConvOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def(py::init<DimSize_t, @@ -39,6 +39,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) { py::arg("dilation_dims")) .def("get_inputs_name", &Conv_Op<DIM>::getInputsName) .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName) + .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName) ; m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels, diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp index 15f2c1c8acb4a1b59cfb0f35ebb78cb611647d3b..d858336b6578b580378778f64984ba565e28f941 100644 --- a/python_binding/operator/pybind_ConvDepthWise.cpp +++ b/python_binding/operator/pybind_ConvDepthWise.cpp @@ -26,7 +26,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { - py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>( + py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>( m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def(py::init<const DimSize_t, @@ -38,7 +38,8 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) { py::arg("stride_dims"), py::arg("dilation_dims")) .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName) - .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName); + .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName) + .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName); m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels, const std::vector<DimSize_t>& kernel_dims, diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp index 606b9ae948847f98d5a1129c08db21e073311879..ad589d73d0aea94d96e62e8065b70bd517633f88 100644 --- a/python_binding/operator/pybind_FC.cpp +++ b/python_binding/operator/pybind_FC.cpp @@ -20,9 +20,10 @@ namespace py = pybind11; namespace Aidge { void declare_FC(py::module &m) { - py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance()) + py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance()) .def("get_inputs_name", &FC_Op::getInputsName) - .def("get_outputs_name", &FC_Op::getOutputsName); + .def("get_outputs_name", &FC_Op::getOutputsName) + .def("attributes_name", &FC_Op::staticGetAttrsName); m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp index f9768e38fbdceef4a15cc74430bc2205bb32cb6a..f0d55e2f40bd89269c96564cea6b5a002b477b8b 100644 --- a/python_binding/operator/pybind_Gather.cpp +++ b/python_binding/operator/pybind_Gather.cpp @@ -19,10 +19,11 @@ namespace py = pybind11; namespace Aidge { void init_Gather(py::module& m) { - py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance()) + py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance()) .def("get_inputs_name", &Gather_Op::getInputsName) - .def("get_outputs_name", &Gather_Op::getOutputsName); + .def("get_outputs_name", &Gather_Op::getOutputsName) + .def("attributes_name", &Gather_Op::staticGetAttrsName); - m.def("Gather", &Gather, py::arg("axis"), py::arg("name") = ""); + m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp index 154fdfa64f279d8d6bb40ea7077acdb4c0fd51b9..6be4f31acde5bac14595d06570d7a3158d398db8 100644 --- a/python_binding/operator/pybind_GenericOperator.cpp +++ b/python_binding/operator/pybind_GenericOperator.cpp @@ -21,13 +21,36 @@ namespace py = pybind11; namespace Aidge { void init_GenericOperator(py::module& m) { - py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor, DynamicAttributes>(m, "GenericOperatorOp", + py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp", py::multiple_inheritance()) .def_readonly_static("identity", &GenericOperator_Op::Identity) .def("compute_output_dims", &GenericOperator_Op::computeOutputDims) .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function")); - m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), - py::arg("name") = ""); + // &GenericOperator + m.def("GenericOperator", + []( const std::string& type, + IOIndex_t nbData, + IOIndex_t nbParam, + IOIndex_t nbOut, + const std::string& name, + const py::kwargs kwargs){ + std::shared_ptr<Node> genericNode = GenericOperator( + type, + nbData, + nbParam, + nbOut, + name + ); + if (kwargs){ + std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator()); + for (auto item : kwargs) { + std::string key = py::cast<std::string>(item.first); + py::object value = py::reinterpret_borrow<py::object>(item.second); + gop->setAttrPy(key, std::move(value)); + } + } + return genericNode; + }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = ""); } } // namespace Aidge diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp index 07300633ad1fb8163d4456afd744c4eb5d7b0ed1..3e9acb831eb3334bd126d3b360f3b5aa39d83731 100644 --- a/python_binding/operator/pybind_LeakyReLU.cpp +++ b/python_binding/operator/pybind_LeakyReLU.cpp @@ -18,9 +18,10 @@ namespace py = pybind11; namespace Aidge { void init_LeakyReLU(py::module& m) { - py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance()) + py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance()) .def("get_inputs_name", &LeakyReLU_Op::getInputsName) - .def("get_outputs_name", &LeakyReLU_Op::getOutputsName); + .def("get_outputs_name", &LeakyReLU_Op::getOutputsName) + .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName); m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp index d0d7f28d52a9a9899b08d37a0c1a4a8720f2ae20..92e4bc801a57fb08fc682b614e61205d0cb5e432 100644 --- a/python_binding/operator/pybind_Matmul.cpp +++ b/python_binding/operator/pybind_Matmul.cpp @@ -22,7 +22,8 @@ namespace Aidge { void init_MatMul(py::module &m) { py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance()) .def("get_inputs_name", &MatMul_Op::getInputsName) - .def("get_outputs_name", &MatMul_Op::getOutputsName); + .def("get_outputs_name", &MatMul_Op::getOutputsName) + .def("attributes_name", &MatMul_Op::staticGetAttrsName); m.def("MatMul", &MatMul, py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp index 0ee3d9df80d7ea7b7be2b8d5c456d5d739506882..485e0eaf6e6e68367ae9037fd922da07433a76e3 100644 --- a/python_binding/operator/pybind_MaxPooling.cpp +++ b/python_binding/operator/pybind_MaxPooling.cpp @@ -26,7 +26,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { - py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>( + py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>( m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, DIM> &, @@ -36,7 +36,8 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) { py::arg("stride_dims"), py::arg("ceil_mode")) .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName) - .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName); + .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName) + .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName); m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, const std::string& name, diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp index 0956d6260e50d3be2418b1cf4089df87e442e54a..df3fdc297ce44cf96ff26bffb4cd96fa1fe8fe22 100644 --- a/python_binding/operator/pybind_Pad.cpp +++ b/python_binding/operator/pybind_Pad.cpp @@ -25,7 +25,7 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_PadOp(py::module &m) { - py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Operator, Attributes>( + py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>( m, ("PadOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def(py::init<const std::array<DimSize_t, 2*DIM> &, @@ -36,6 +36,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) { py::arg("borderValue") = 0.0) .def("get_inputs_name", &Pad_Op<DIM>::getInputsName) .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName) + .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName) ; m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples, diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp index 78d9ce3489a8309c42cc90189e588a448fd9649a..3caa438d18b3919dbedcf66e4ba53b92b84a50b5 100644 --- a/python_binding/operator/pybind_Producer.cpp +++ b/python_binding/operator/pybind_Producer.cpp @@ -30,13 +30,14 @@ void declare_Producer(py::module &m) { void init_Producer(py::module &m) { - py::class_<Producer_Op, std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>( + py::class_<Producer_Op, std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>( m, "ProducerOp", py::multiple_inheritance()) .def("dims", &Producer_Op::dims) .def("get_inputs_name", &Producer_Op::getInputsName) - .def("get_outputs_name", &Producer_Op::getOutputsName); + .def("get_outputs_name", &Producer_Op::getOutputsName) + .def("attributes_name", &Producer_Op::staticGetAttrsName); m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false); declare_Producer<1>(m); diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp index e5de98b69adde5133dde302f7306bc8a5c471eef..1a50edba03f62e6c43ff60320fe4c3d5caa65f41 100644 --- a/python_binding/operator/pybind_ReduceMean.cpp +++ b/python_binding/operator/pybind_ReduceMean.cpp @@ -24,10 +24,11 @@ namespace py = pybind11; namespace Aidge { template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) { - py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>( + py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>( m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName) .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName) + .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName) ; m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes, diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp index 04e92d39971a731931397e943aba6e296a81a14d..780cffdef695b71dbc2781ba30936b3b45657cbb 100644 --- a/python_binding/operator/pybind_Softmax.cpp +++ b/python_binding/operator/pybind_Softmax.cpp @@ -19,9 +19,10 @@ namespace py = pybind11; namespace Aidge { void init_Softmax(py::module& m) { - py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance()) + py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance()) .def("get_inputs_name", &Softmax_Op::getInputsName) - .def("get_outputs_name", &Softmax_Op::getOutputsName); + .def("get_outputs_name", &Softmax_Op::getOutputsName) + .def("attributes_name", &Softmax_Op::staticGetAttrsName); m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = ""); } diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp index e92e9c2aaafe2d20220da053a2b9d799fbe8466d..d535a2c932c8d61c0395f03ffc0978caf7ad692f 100644 --- a/python_binding/operator/pybind_Transpose.cpp +++ b/python_binding/operator/pybind_Transpose.cpp @@ -25,12 +25,13 @@ namespace py = pybind11; namespace Aidge { -template <DimIdx_t DIM> +template <DimIdx_t DIM> void declare_Transpose(py::module &m) { - py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>( + py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>( m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance()) .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName) - .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName); + .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName) + .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName); m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order, const std::string& name) { diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp index be0d357b7f73e26aad44994f407696f70617ad71..e57b06cc5014e7159f5a3e5927aedfefb996cae4 100644 --- a/python_binding/pybind_core.cpp +++ b/python_binding/pybind_core.cpp @@ -11,6 +11,9 @@ #include <pybind11/pybind11.h> +#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor + + namespace py = pybind11; namespace Aidge { diff --git a/python_binding/utils/pybind_Parameter.cpp b/python_binding/utils/pybind_Attributes.cpp similarity index 79% rename from python_binding/utils/pybind_Parameter.cpp rename to python_binding/utils/pybind_Attributes.cpp index 2957876f31ad0781a36905cef3a5ae88934b6a8a..bfce891176822a3b1c07b1ded0c46c9c94a43c0a 100644 --- a/python_binding/utils/pybind_Parameter.cpp +++ b/python_binding/utils/pybind_Attributes.cpp @@ -1,6 +1,7 @@ #include <pybind11/pybind11.h> #include "aidge/utils/Attributes.hpp" #include "aidge/utils/DynamicAttributes.hpp" +#include "aidge/utils/StaticAttributes.hpp" namespace py = pybind11; namespace Aidge { @@ -21,11 +22,13 @@ void init_Attributes(py::module& m){ .def("has_attr", &Attributes::hasAttr, py::arg("name")) .def("get_attr_type", &Attributes::getAttrType, py::arg("name")) .def("get_attrs_name", &Attributes::getAttrsName) - .def("get_attr", &Attributes::getAttrPy, py::arg("name")); + .def("get_attr", &Attributes::getAttrPy, py::arg("name")) + .def("__getattr__", &Attributes::getAttrPy, py::arg("name")) + .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value")) + .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value")); py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes") .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value")) - .def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value")) .def("del_attr", &DynamicAttributes::delAttr, py::arg("name")); m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding); diff --git a/src/backend/TensorImpl.cpp b/src/backend/TensorImpl.cpp index 3982ee1fed9c9198b539bf9a28edd461992b791f..ee2f82a9cf847bfc6fe51e8d8b621e53a4c93cf4 100644 --- a/src/backend/TensorImpl.cpp +++ b/src/backend/TensorImpl.cpp @@ -14,23 +14,23 @@ #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" -void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length) { - if (&srcImpl == this) { +void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset, NbElts_t dstOffset) { + if (&srcImpl == this && srcOffset == dstOffset) { return; } if (srcImpl.device() != device()) { if (srcImpl.backend() == backend()) { // Same backend, but different device - copyFromDevice(srcImpl.rawPtr(), length, srcImpl.device()); + copyFromDevice(srcImpl.rawPtr(srcOffset), srcImpl.device(), length, dstOffset); } else if (srcImpl.hostPtr() != nullptr) { // Different backend, but input is valid on host - copyFromHost(srcImpl.hostPtr(), length); + copyFromHost(srcImpl.hostPtr(srcOffset), length, dstOffset); } else if (hostPtr() != nullptr) { // Different backend, but dst is valid on host - srcImpl.copyToHost(hostPtr(), length); + srcImpl.copyToHost(hostPtr(srcOffset), length, dstOffset); } else { // No direct link possible from src to dst device @@ -40,12 +40,12 @@ void Aidge::TensorImpl::copyFrom(const TensorImpl& srcImpl, NbElts_t length) { // - There is currently no concrete use case // - Just providing a pointer would be unsafe (risk of buffer overflow...) auto tmpHostBuffer = std::unique_ptr<char[]>(new char[scalarSize() * length]); - srcImpl.copyToHost(tmpHostBuffer.get(), length); - copyFromHost(tmpHostBuffer.get(), length); + srcImpl.copyToHost(tmpHostBuffer.get(), length, srcOffset); + copyFromHost(tmpHostBuffer.get(), length, dstOffset); } } else { // Same device: simple copy on device - copy(srcImpl.rawPtr(), length); + copy(srcImpl.rawPtr(srcOffset), length, dstOffset); } } diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp index da0c626d78dd1cc4452bfc07bf6c6a7f58b8d1e4..d45dee5639a6bc082871e1110657392fb97c15ec 100644 --- a/src/data/Tensor.cpp +++ b/src/data/Tensor.cpp @@ -13,11 +13,72 @@ #include "aidge/utils/Types.h" #include "aidge/utils/ErrorHandling.hpp" +Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const { + AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); + AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions"); + + Tensor subTensor(mDataType); + subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()), + std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end())); + subTensor.setBackend(mImpl->backend(), mImpl->device().second); + subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); + return subTensor; +} + +Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const { + AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous"); + AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions"); + + Tensor subTensor(mDataType); + subTensor.resize(dims, mStrides); + subTensor.setBackend(mImpl->backend(), mImpl->device().second); + subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx)); + return subTensor; +} + +void Aidge::Tensor::makeContiguous() { + if (!mImpl || isContiguous()) { + return; + } + + // Block so that mImpl ref count is 1 for resize() + { + // Create a new storage that will be contiguous + std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize); + // Copy elements from old to new storage + size_t idx = 0; + while (idx < mSize) { + const size_t storageIdx = getStorageIdx(getCoord(idx)); + + // Determine the size of the contiguous chunk + size_t copySize = 1; + while (idx + copySize < mSize && + getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize) + { + ++copySize; + } + + // Perform a single copy for the contiguous chunk + newImpl->copy(mImpl->rawPtr(mImplOffset + storageIdx), copySize, idx); + + // Move to the next index after the contiguous chunk + idx += copySize; + } + // Replace old storage by new, contiguous, storage + setImpl(newImpl); + } + + // Resize tensor without strides => tensor is now contiguous + resize(mDims); +} + void Aidge::Tensor::copyCast(const Tensor& src) { if (&src == this) { return; } + AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast non-contiguous tensor"); + // Current Tensor has necessarily a data type, but may not have backend if (!getImpl()) { // If no backend was set for the current tensor, use the same as src @@ -27,7 +88,7 @@ void Aidge::Tensor::copyCast(const Tensor& src) { resize(src.dims()); AIDGE_ASSERT(src.getImpl()->device() == getImpl()->device(), "cannot copy-cast from a different backend/device"); - getImpl()->copyCast(src.getImpl()->rawPtr(), src.size(), src.dataType()); + getImpl()->copyCast(src.getImpl()->rawPtr(src.mImplOffset), src.dataType(), src.size(), mImplOffset); } void Aidge::Tensor::copyFrom(const Tensor& src) { @@ -35,6 +96,8 @@ void Aidge::Tensor::copyFrom(const Tensor& src) { return; } + AIDGE_ASSERT(src.isContiguous(), "cannot copy from non-contiguous tensor"); + // Current Tensor has necessarily a data type, but may not have backend if (!getImpl()) { // If no backend was set for the current tensor, use the same as src @@ -44,7 +107,7 @@ void Aidge::Tensor::copyFrom(const Tensor& src) { resize(src.dims()); AIDGE_ASSERT(src.dataType() == dataType(), "cannot copy from a different data type"); - getImpl()->copyFrom(*(src.getImpl()), src.size()); + getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset); } void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& movedSrcPtr) { @@ -52,6 +115,8 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& mov return; } + AIDGE_ASSERT(src.isContiguous(), "cannot copy-cast from non-contiguous tensor"); + // Current Tensor has necessarily a data type, but may not have backend if (!getImpl()) { // If no backend was set for the current tensor, use the same as src @@ -65,12 +130,35 @@ void Aidge::Tensor::copyCastFrom(const Tensor& src, std::shared_ptr<Tensor>& mov const auto device = getImpl()->device(); const Tensor& movedSrc = src.refFrom(movedSrcPtr, device.first, device.second); // Second, copy-cast data (necessary) - getImpl()->copyCast(movedSrc.getImpl()->rawPtr(), movedSrc.size(), movedSrc.dataType()); + getImpl()->copyCast(movedSrc.getImpl()->rawPtr(movedSrc.mImplOffset), movedSrc.dataType(), movedSrc.size(), mImplOffset); } else { // Directly copy, no conversion necessary // Avoid making a double copy if both data type and device are the same - getImpl()->copyFrom(*(src.getImpl()), src.size()); + getImpl()->copyFrom(*(src.getImpl()), src.size(), src.mImplOffset, mImplOffset); + } +} + +Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) { + // Scott Meyers' solution to avoid code duplication + return const_cast<Tensor&>(static_cast<const Tensor&>(*this).refContiguous(fallback)); +} + +const Aidge::Tensor& Aidge::Tensor::refContiguous(std::shared_ptr<Tensor>& fallback) const { + AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot refCast() it"); + + if (isContiguous()) { + return *this; + } + else { + if (this != fallback.get()) { + // Shallow copy to fallback + *fallback = *this; + } + + // Make fallback contiguous + fallback->makeContiguous(); + return *fallback; } } @@ -91,6 +179,8 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, c fallback->setDataType(dt); } else { + AIDGE_ASSERT(isContiguous(), "cannot refCast non-contiguous tensor"); + if (!fallback) { fallback = std::make_shared<Tensor>(dt); } @@ -101,7 +191,7 @@ const Aidge::Tensor& Aidge::Tensor::refCast(std::shared_ptr<Tensor>& fallback, c const auto device = getImpl()->device(); fallback->setBackend(device.first, device.second, false); // don't keep previous data (no copy) fallback->resize(dims()); - fallback->getImpl()->copyCast(getImpl()->rawPtr(), size(), dataType()); + fallback->getImpl()->copyCast(getImpl()->rawPtr(mImplOffset), dataType(), size(), fallback->mImplOffset); } return *fallback; } @@ -124,6 +214,8 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, c fallback->setBackend(backend, device); } else { + AIDGE_ASSERT(isContiguous(), "cannot refFrom non-contiguous tensor"); + if (!fallback) { fallback = std::make_shared<Tensor>(dataType()); } @@ -133,8 +225,34 @@ const Aidge::Tensor& Aidge::Tensor::refFrom(std::shared_ptr<Tensor>& fallback, c fallback->setBackend(backend, device, false); // don't keep previous data (no copy) fallback->resize(dims()); - fallback->getImpl()->copyFrom(*getImpl(), size()); + fallback->getImpl()->copyFrom(*getImpl(), size(), mImplOffset, fallback->mImplOffset); + } + return *fallback; + } +} + +Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) { + // Scott Meyers' solution to avoid code duplication + return const_cast<Tensor&>(static_cast<const Tensor&>(*this).ref(fallback, dt, backend, device)); +} + +const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device) const { + AIDGE_ASSERT(getImpl(), "no backend was set for tensor, cannot ref() it"); + + if (dt == dataType() && std::make_pair(backend, device) == getImpl()->device()) { + return *this; + } + else { + // Change fallback type, backend & device, without any data copy + if (!fallback) { + fallback = std::make_shared<Tensor>(dt); } + else { + fallback->setDataType(dt, false); // don't keep previous data (no copy) + } + + fallback->setBackend(backend, device, false); // don't keep previous data (no copy) + fallback->resize(dims()); return *fallback; } } diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp index 30804994b6084a5a5558f106a38a6087e54471bc..b5f9d738a0280b3bacdb2ce201c8303b2b4d0a1f 100644 --- a/src/operator/Gather.cpp +++ b/src/operator/Gather.cpp @@ -9,8 +9,8 @@ * ********************************************************************************/ -#include <cassert> #include <cstddef> +#include <cstdint> #include <string> #include <vector> @@ -22,18 +22,26 @@ const std::string Aidge::Gather_Op::Type = "Gather"; void Aidge::Gather_Op::computeOutputDims() { // check inputs have been associated - if (!getInput(0) || !getInput(1)) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected"); + if (!getInput(0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected"); } - if (getInput(1)->nbDims()!=2){ - AIDGE_THROW_OR_ABORT(std::runtime_error, "Indices input must be a 2D Tensor"); - } + if (!getInput(0)->empty()) { + std::vector<DimSize_t> outDims = getInput(0)->dims(); + const std::vector<DimSize_t> gatheredShape = this->template getAttr<GatherAttr::GatheredShape>(); + // TODO: check indices and gatheredShape + + const std::int64_t axisIdx = this->template getAttr<GatherAttr::Axis>() >= 0 ? + this->template getAttr<GatherAttr::Axis>() : + this->template getAttr<GatherAttr::Axis>() + outDims.size(); + outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx)); + if (!gatheredShape.empty()) + { + outDims.insert(outDims.cbegin() + static_cast<std::size_t>(axisIdx), + gatheredShape.cbegin(), + gatheredShape.cend()); + } - std::vector<DimSize_t> outDims = getInput(0)->dims(); - std::vector<DimSize_t> indexesDims = getInput(1)->dims(); - int axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?this->template getAttr<GatherAttr::Axis>():this->template getAttr<GatherAttr::Axis>()+outDims.size(); - outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx)); - outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), indexesDims.begin(),indexesDims.end()); - mOutputs[0]->resize(outDims); + mOutputs[0]->resize(outDims); + } } \ No newline at end of file diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index c1a7c35e395418995a720efd49c7cfce0801863e..30b060cd2a58d7995a7447bd9b85b9bc0026a7f7 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -27,31 +27,32 @@ void Aidge::Reshape_Op::computeOutputDims() { AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected"); } - std::vector<DimSize_t> outDims; - - // variables to handle a negative dimension - bool foundNegativeDimension = false; - std::size_t outSize = 1; - DimIdx_t negativeIndex = 0; - - for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i) - { - std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i]; - if (dimSize < 0) { - if (foundNegativeDimension) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator."); + if (!getInput(0)->empty()) { + std::vector<DimSize_t> outDims; + // variables to handle a negative dimension + bool foundNegativeDimension = false; + std::size_t outSize = 1; + DimIdx_t negativeIndex = 0; + + for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i) + { + std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i]; + if (dimSize < 0) { + if (foundNegativeDimension) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator."); + } + foundNegativeDimension = true; + dimSize = 1; + negativeIndex = static_cast<DimIdx_t>(i); } - foundNegativeDimension = true; - dimSize = 1; - negativeIndex = static_cast<DimIdx_t>(i); + outDims.push_back(static_cast<DimSize_t>(dimSize)); + outSize *= static_cast<DimSize_t>(dimSize); } - outDims.push_back(static_cast<DimSize_t>(dimSize)); - outSize *= static_cast<DimSize_t>(dimSize); - } - if (foundNegativeDimension) { - outDims[negativeIndex] = (getInput(0) -> size()) / outSize; - } + if (foundNegativeDimension) { + outDims[negativeIndex] = (getInput(0) -> size()) / outSize; + } - mOutputs[0]->resize(outDims); + mOutputs[0]->resize(outDims); + } } \ No newline at end of file diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp index 139e84b561a48c2f6a5ecd14ed9d6905d66dec20..11d91a1fcd4c1d4ee6bcc5f9d830870fa6e732e5 100644 --- a/src/operator/Slice.cpp +++ b/src/operator/Slice.cpp @@ -30,21 +30,23 @@ void Aidge::Slice_Op::computeOutputDims() { AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); } - DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size(); + const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size(); std::vector<DimSize_t> outDims = getInput(0)->dims(); for (std::size_t i = 0; i < nbAxes; ++i) { // For each slice operation get the params and cast them to size_t const std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i]; const std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i]; const std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i]; - const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : axis_ + getInput(0)->nbDims(); - const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : start_ + getInput(0)->dims()[axis]; - const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : end_ + getInput(0)->dims()[axis]; + const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : static_cast<std::size_t>(axis_) + getInput(0)->nbDims(); + const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : static_cast<std::size_t>(start_) + getInput(0)->dims()[axis]; + const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : static_cast<std::size_t>(end_) + getInput(0)->dims()[axis]; const std::size_t sliceLength = end - start + 1; // Check if slice length is valid if (sliceLength > getInput(0)->dims()[axis]) + { AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds"); + } outDims[axis] = sliceLength; } mOutputs[0]->resize(outDims); diff --git a/src/recipies/HorizontalTiling.cpp b/src/recipies/HorizontalTiling.cpp index 6cc34eba076934b884b336ce40081a855d917182..7d3fafc0a15d1b797fdfb1a2884b62d2d8d766c5 100644 --- a/src/recipies/HorizontalTiling.cpp +++ b/src/recipies/HorizontalTiling.cpp @@ -82,16 +82,16 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: clonedInputs[1] -> addChild(newNode, 0, 1); clonedInputs[2] -> addChild(newNode, 0, 2); // Slice for input and each parameter - std::vector<std::int32_t> inputDimsEnd(inputDims[0].first.size()); + std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size()); for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) { - inputDimsEnd[dim] = static_cast<std::int32_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1; + inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1; } - std::vector<std::int32_t> inputDimsStart(inputDims[0].first.size()); + std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size()); for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) { - inputDimsStart[dim] = static_cast<std::int32_t>(inputDims[0].first[dim]); + inputDimsStart[dim] = static_cast<std::int64_t>(inputDims[0].first[dim]); } - std::vector<std::int32_t> usedDims(inputDimsEnd.size()); - std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int32_t>(0)); + std::vector<std::int64_t> usedDims(inputDimsEnd.size()); + std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int64_t>(0)); auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis])); slice -> addChild(newNode, 0, 0); newNode -> addChild(concat, 0, i); diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cfcfb45e3735538c1650cfd990ea85e2333916ad --- /dev/null +++ b/unit_tests/data/Test_TensorImpl.cpp @@ -0,0 +1,100 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <array> + +#include <catch2/catch_test_macros.hpp> + +#include "aidge/data/Tensor.hpp" +#include "aidge/utils/TensorUtils.hpp" +#include "aidge/backend/cpu/data/TensorImpl.hpp" + +using namespace Aidge; + +TEST_CASE("Tensor creation") { + SECTION("from const array") { + Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + + Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + + Tensor xFloat = + Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; + + SECTION("Tensor features") { + REQUIRE(x.nbDims() == 3); + REQUIRE(x.dims()[0] == 2); + REQUIRE(x.dims()[1] == 2); + REQUIRE(x.dims()[2] == 2); + REQUIRE(x.size() == 8); + } + + SECTION("Access to array") { + REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1); + REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8); + } + + SECTION("get function") { + REQUIRE(x.get<int>({0, 0, 0}) == 1); + REQUIRE(x.get<int>({0, 0, 1}) == 2); + REQUIRE(x.get<int>({0, 1, 1}) == 4); + REQUIRE(x.get<int>({1, 1, 0}) == 7); + x.set<int>({1, 1, 1}, 36); + REQUIRE(x.get<int>({1, 1, 1}) == 36); + } + + SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); } + + SECTION("Tensor (in)equality") { + REQUIRE(x == xCopy); + REQUIRE_FALSE(x == xFloat); + } + } +} + +TEST_CASE("Tensor methods") { + Tensor x = Array3D<int, 2, 2, 2>{{ + {{1, 2}, + {3, 4}}, + {{5, 6}, + {7, 8}} + }}; + + Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}}; + + Tensor xFloat = + Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}}; + + SECTION("Tensor sharing") { + Tensor xCopyCtor(x); + REQUIRE(xCopyCtor.getImpl() == x.getImpl()); + + Tensor xEqOp = x; + REQUIRE(xEqOp.getImpl() == x.getImpl()); + + Tensor xCloned = x.clone(); + REQUIRE(xCloned.getImpl() != x.getImpl()); + REQUIRE(xCloned == x); + } + + SECTION("Tensor extract") { + Tensor y = x.extract({0, 1}); + REQUIRE(y.getImpl() == x.getImpl()); + REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}})); + REQUIRE(y.isContiguous()); + + Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1}); + REQUIRE(y2.getImpl() == x.getImpl()); + REQUIRE(!y2.isContiguous()); + Tensor y3 = y2.clone(); + REQUIRE(y3.isContiguous()); + REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}})); + } +}