Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/aidge/aidge_core
  • hrouis/aidge_core
  • mszczep/aidge_core
  • oantoni/aidge_core
  • cguillon/aidge_core
  • jeromeh/aidge_core
  • axelfarr/aidge_core
  • cmoineau/aidge_core
  • noamzerah/aidge_core
  • lrakotoarivony/aidge_core
  • silvanosky/aidge_core
  • maab05/aidge_core
  • mick94/aidge_core
  • lucaslopez/aidge_core_ll
  • wboussella/aidge_core
  • farnez/aidge_core
  • mnewson/aidge_core
17 results
Show changes
Commits on Source (40)
Showing
with 647 additions and 122 deletions
......@@ -125,6 +125,23 @@ class test_operator_binding(unittest.TestCase):
generic_op.forward() # Increment idx
self.assertEqual(customImpl.idx, 1)
def test_magic_meth(self):
myVar = 2
myBool = True
# Test dynamic attribute set
gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
gop.myBool = myBool
# Test variable set by kwargs
self.assertEqual(gop.myVar, myVar)
# Test set attr
self.assertEqual(gop.myBool, myBool)
# Test static attribute set !
prod = aidge_core.Producer([1]).get_operator()
self.assertEqual(prod.Constant, False)
prod.Constant = True # By default Constant is False
self.assertEqual(prod.Constant, True)
if __name__ == '__main__':
......
......@@ -10,16 +10,16 @@ SPDX-License-Identifier: EPL-2.0
import unittest
import aidge_core
from functools import reduce
import numpy as np
class test_tensor(unittest.TestCase):
"""
"""Test tensor binding
"""
def setUp(self):
pass
def tearDown(self):
pass
......@@ -35,10 +35,60 @@ class test_tensor(unittest.TestCase):
idx = t.get_idx(coord)
self.assertEqual(idx, i)
if __name__ == '__main__':
unittest.main()
def test_getavailable_backends(self):
self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends())
def test_numpy_int_to_tensor(self):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape):
self.assertEqual(i,j)
def test_tensor_int_to_numpy(self):
np_array = np.arange(9).reshape(1,1,3,3)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
# Tensor -> Numpy
nnarray = np.array(t)
for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()):
self.assertTrue(i_nn == i_n)
for i,j in zip(t.dims(), nnarray.shape):
self.assertEqual(i,j)
def test_numpy_int64_to_tensor(self):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape):
self.assertEqual(i,j)
def test_numpy_float_to_tensor(self):
t = aidge_core.Tensor()
np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
for i,j in zip(t.dims(), np_array.shape):
self.assertEqual(i,j)
def test_get_set(self):
dims = [2,2,2]
np_array = np.arange(8).reshape(dims).astype(np.int32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
for i in range(8):
self.assertEqual(t[i], i)
t[i] = 5
self.assertEqual(t[i], 5)
if __name__ == '__main__':
unittest.main()
......@@ -15,6 +15,9 @@
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
......
......@@ -67,19 +67,13 @@ private:
class TensorImpl {
public:
TensorImpl() = delete;
TensorImpl(const char *backend, DeviceIdx_t device = 0) : mBackend(backend), mDevice(device){};
TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {};
/**
* Return the (backend, device) pair for this implementation.
*/
std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); }
/**
* Set the device ID for current backend.
* @param device New device ID on current backend.
*/
virtual void setDevice(DeviceIdx_t device) = 0;
/**
* Copy data from the same device.
* @param src Pointer on current implementation device.
......@@ -93,30 +87,34 @@ public:
* @param srcDt Source data type.
* @param src Pointer on current implementation device.
* @param length Number of elements to copy.
* @param offset Destination offset (in number of elements).
*/
virtual void copyCast(const void *src, NbElts_t length, const DataType srcDt) = 0;
virtual void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) = 0;
/**
* Copy data from an other device on the same backend.
* @param device (backend, device) pair to copy from. The backend must match current implementation backend.
* @param src Pointer on current implementation backend.
* @param length Number of elements to copy.
* @param offset Destination offset (in number of elements).
*/
virtual void copyFromDevice(const void *src, NbElts_t length, const std::pair<std::string, DeviceIdx_t>& device) = 0;
virtual void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) = 0;
/**
* Copy data from host.
* @param src Host pointer to copy from.
* @param length Number of elements to copy.
* @param offset Destination offset (in number of elements).
*/
virtual void copyFromHost(const void *src, NbElts_t length) = 0;
virtual void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) = 0;
/**
* Copy data to host.
* @param src Host pointer to copy to.
* @param length Number of elements to copy.
* @param offset Source offset (in number of elements).
*/
virtual void copyToHost(void *dst, NbElts_t length) const = 0;
virtual void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const = 0;
/**
* Return the raw device pointer.
......@@ -146,8 +144,22 @@ public:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Cannot set raw pointer for backend %s", mBackend);
};
virtual std::size_t size() const = 0; // Storage size
virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
/**
* Set the size, in number of elements, that must be stored.
*/
void resize(NbElts_t length) {
mNbElts = length;
}
/**
* Return the number of elements stored.
*/
inline std::size_t size() const noexcept { return mNbElts; }
/**
* Return the size (in bytes) of one element (scalar).
*/
virtual std::size_t scalarSize() const noexcept = 0;
constexpr const char *backend() const { return mBackend; }
virtual ~TensorImpl() = default;
virtual bool operator==(const TensorImpl &othImpl) const = 0;
......@@ -156,12 +168,16 @@ public:
* Copy from another backend.
* @param srcImpl Source TensorImpl to copy from.
* @param length Number of elements of size scalarSize() to copy
* @param srcOffset Source offset (in number of elements).
* @param dstOffset Destination offset (in number of elements).
*/
void copyFrom(const TensorImpl& srcImpl, NbElts_t length);
void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0);
protected:
const char *mBackend;
DeviceIdx_t mDevice;
const DeviceIdx_t mDevice;
/// Number of elements (to be) stored
NbElts_t mNbElts;
};
} // namespace Aidge
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
#define AIDGE_CPU_DATA_GETCPUPTR_H_
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
const auto tensor = std::static_pointer_cast<Tensor>(data);
return tensor->getImpl()->hostPtr(tensor->getImplOffset());
}
} // namespace Aidge
#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
#define AIDGE_CPU_DATA_TENSORIMPL_H_
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/half.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/future_std/span.hpp"
namespace Aidge {
template <class T>
class TensorImpl_cpu : public TensorImpl {
private:
/// Pointer to the data and its capacity
future_std::span<T> mData;
/// If this instance own the data, std::unique_ptr manages it
std::unique_ptr<T[]> mDataOwner;
public:
static constexpr const char *Backend = "cpu";
TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
bool operator==(const TensorImpl &otherImpl) const override final {
const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
std::size_t i = 0;
for (; i < mNbElts &&
*(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
++i) {
}
return i == mNbElts;
}
static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
return std::make_shared<TensorImpl_cpu<T>>(device, length);
}
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
std::copy(srcT, srcT + length, dstT);
}
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
if (length == 0) {
return;
}
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
switch (srcDt)
{
case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
dstT);
break;
case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
dstT);
break;
case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
dstT);
break;
case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
dstT);
break;
case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
dstT);
break;
case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
dstT);
break;
case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
dstT);
break;
case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
dstT);
break;
case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
dstT);
break;
case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
dstT);
break;
case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
dstT);
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
}
}
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
copy(src, length, offset);
}
inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
copy(src, length, offset);
}
void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
const T* src = static_cast<const T*>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
std::copy(src, src + length, static_cast<T *>(dst));
}
void *rawPtr(NbElts_t offset = 0) override final {
lazyInit();
return (mData.data() + offset);
};
const void *rawPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
return (mData.data() + offset);
};
void *hostPtr(NbElts_t offset = 0) override final {
lazyInit();
return (mData.data() + offset);
};
const void *hostPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
return (mData.data() + offset);
};
void setRawPtr(void *ptr, NbElts_t length) override final {
AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
mData = future_std::span<T>(static_cast<T *>(ptr), length);
mDataOwner.reset();
};
virtual ~TensorImpl_cpu() = default;
private:
void lazyInit() {
if (mData.size() < mNbElts) {
// Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
mDataOwner.reset(new T[mNbElts]);
mData = future_std::span<T>(mDataOwner.get(), mNbElts);
}
}
};
namespace {
static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
{"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
{"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
{"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
{"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
This diff is collapsed.
......@@ -27,25 +27,26 @@
#include "aidge/utils/Types.h"
namespace Aidge {
enum class GatherAttr { Axis };
enum class GatherAttr { Indices, GatheredShape, Axis };
class Gather_Op : public OperatorTensor,
public Registrable<Gather_Op,
std::string,
std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
public StaticAttributes<GatherAttr, int> {
public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> {
public:
static const std::string Type;
Gather_Op() = delete;
using Attributes_ = StaticAttributes<GatherAttr, int>;
using Attributes_ = StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t>;
template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
Gather_Op(int axis)
: OperatorTensor(Type, 2, 0, 1),
Gather_Op(const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(
attr<GatherAttr::Indices>(indices),
attr<GatherAttr::GatheredShape>(gatheredShape),
attr<GatherAttr::Axis>(axis))
{}
......@@ -76,21 +77,21 @@ public:
}
static const std::vector<std::string> getInputsName(){
return {"data_input", "indexes"};
return {"data_input"};
}
static const std::vector<std::string> getOutputsName(){
return {"data_output"};
}
};
inline std::shared_ptr<Node> Gather(int axis = 0, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Gather_Op>(axis), name);
inline std::shared_ptr<Node> Gather( const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis = 0, const std::string& name = "") {
return std::make_shared<Node>(std::make_shared<Gather_Op>(indices, gatheredShape, axis), name);
}
} // namespace Aidge
namespace {
template <>
const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis"};
const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Indices", "GatheredShape", "Axis"};
}
#endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
......@@ -29,17 +29,17 @@ enum class SliceAttr { Starts, Ends, Axes };
class Slice_Op
: public OperatorTensor,
public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>,
public StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>> {
public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> {
public:
static const std::string Type;
Slice_Op() = delete;
using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>>;
using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>>;
template <SliceAttr e>
using attr = typename Attributes_::template attr<e>;
Slice_Op(const std::vector<std::int32_t>& starts, const std::vector<std::int32_t>& ends, const std::vector<std::int32_t>& axes)
Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>& ends, const std::vector<std::int64_t>& axes)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<SliceAttr::Starts>(starts),
attr<SliceAttr::Ends>(ends),
......@@ -94,9 +94,9 @@ public:
* @param name Name of the Operator.
* @return std::shared_ptr<Node> A Node containing the Operator.
*/
inline std::shared_ptr<Node> Slice(const std::vector<std::int32_t> starts,
const std::vector<std::int32_t> ends,
const std::vector<std::int32_t> axes,
inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t> starts,
const std::vector<std::int64_t> ends,
const std::vector<std::int64_t> axes,
const std::string &name = "") {
// FIXME: properly handle default w&b initialization in every cases
return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes), name);
......
......@@ -69,6 +69,11 @@ public:
* be agnostic from its return type.
*/
virtual py::object getAttrPy(const std::string& name) const = 0;
/* Bindable set function, does not recquire any templating.
* This is thanks to py::object which allow the function to
* be agnostic from ``value`` type.
*/
virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
#endif
virtual ~Attributes() {}
};
......
......@@ -135,7 +135,7 @@ public:
assert(res.second && "attribute already exists");
}
void setAttrPy(const std::string& name, py::object&& value)
void setAttrPy(const std::string& name, py::object&& value) override final
{
auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
if (!resPy.second)
......@@ -204,7 +204,7 @@ private:
// Stores C++ attributes (copy) and Python-only attributes
// Code should be compiled with -fvisibility=hidden
// See https://pybind11.readthedocs.io/en/stable/faq.html:
// “‘SomeClass’ declared with greater visibility than the type of its
// “‘SomeClass’ declared with greater visibility than the type of its
// field ‘SomeClass::member’ [-Wattributes]”
// This map will only be populated if Python interpreter is running
std::map<std::string, py::object> mAttrsPy;
......
......@@ -202,6 +202,22 @@ public:
}
#ifdef PYBIND
/**
* @brief Return a set of attributes defined.
* This method is used to automatically retrieve attributes in the documentation.
* This method is a duplicate of ``getAttrsName`` but static.
*
* @return std::set<std::string>
*/
static std::set<std::string> staticGetAttrsName() {
std::set<std::string> attrsName;
for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
}
return attrsName;
}
py::object getAttrPy(const std::string& name) const override {
for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
......@@ -212,7 +228,22 @@ public:
}
AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
};
}
void setAttrPy(const std::string& name, py::object&& value) override final{
for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
// Cannot update attribute using reference has it would require templating
// Use a dirty
auto tmpAttr = py::cast(mAttrs);
py::detail::accessor_policies::tuple_item::set(tmpAttr, static_cast<py::size_t>(i), value);
mAttrs = py::cast<std::tuple<T...>>(tmpAttr);
return;
}
}
AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
}
#endif
private:
......
......@@ -30,25 +30,27 @@ void addCtor(py::class_<Tensor,
Data,
Registrable<Tensor,
std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>>& mTensor){
mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) {
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>& mTensor){
mTensor.def(py::init([](
py::array_t<T, py::array::c_style | py::array::forcecast> b,
std::string backend = "cpu") {
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
Tensor* newTensor = new Tensor();
newTensor->setDataType(NativeType<T>::type);
const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
newTensor->resize(dims);
// TODO : Find a better way to choose backend
std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find("cpu") != availableBackends.end()){
newTensor->setBackend("cpu");
if (availableBackends.find(backend) != availableBackends.end()){
newTensor->setBackend(backend);
newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
}else{
printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend %s, verify you have `import aidge_backend_%s`.\n", backend.c_str(), backend.c_str());
}
return newTensor;
}))
}), py::arg("array"), py::arg("backend")="cpu")
.def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
.def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
;
......@@ -58,16 +60,16 @@ void addCtor(py::class_<Tensor,
void init_Tensor(py::module& m){
py::class_<Registrable<Tensor,
std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>,
std::shared_ptr<Registrable<Tensor,
std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>>>(m,"TensorRegistrable");
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>>(m,"TensorRegistrable");
py::class_<Tensor, std::shared_ptr<Tensor>,
Data,
Registrable<Tensor,
std::tuple<std::string, DataType>,
std::unique_ptr<TensorImpl>(const Tensor&)>> pyClassTensor
std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>> pyClassTensor
(m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
pyClassTensor.def(py::init<>())
......@@ -76,7 +78,7 @@ void init_Tensor(py::module& m){
.def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
.def("dtype", &Tensor::dataType)
.def("size", &Tensor::size)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) &Tensor::resize)
.def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
.def("has_impl", &Tensor::hasImpl)
.def("get_coord", &Tensor::getCoord)
.def("get_idx", &Tensor::getIdx)
......@@ -118,7 +120,7 @@ void init_Tensor(py::module& m){
}
})
.def_buffer([](Tensor& b) -> py::buffer_info {
const std::unique_ptr<TensorImpl>& tensorImpl = b.getImpl();
const std::shared_ptr<TensorImpl>& tensorImpl = b.getImpl();
std::vector<size_t> dims;
std::vector<size_t> strides;
......
......@@ -26,7 +26,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
......@@ -34,7 +34,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
py::arg("kernel_dims"),
py::arg("stride_dims"))
.def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
.def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
.def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
......
......@@ -21,9 +21,10 @@ namespace Aidge {
template <DimSize_t DIM>
void declare_BatchNormOp(py::module& m) {
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
.def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
.def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
.def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
}
......
......@@ -19,9 +19,10 @@ namespace py = pybind11;
namespace Aidge {
void init_Concat(py::module& m) {
py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance())
py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
.def("get_inputs_name", &Concat_Op::getInputsName)
.def("get_outputs_name", &Concat_Op::getOutputsName);
.def("get_outputs_name", &Concat_Op::getOutputsName)
.def("attributes_name", &Concat_Op::staticGetAttrsName);
m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
}
......
......@@ -24,7 +24,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<DimSize_t,
......@@ -39,6 +39,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
py::arg("dilation_dims"))
.def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
.def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
.def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
;
m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
......
......@@ -26,7 +26,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const DimSize_t,
......@@ -38,7 +38,8 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::arg("stride_dims"),
py::arg("dilation_dims"))
.def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
.def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
const std::vector<DimSize_t>& kernel_dims,
......
......@@ -20,9 +20,10 @@ namespace py = pybind11;
namespace Aidge {
void declare_FC(py::module &m) {
py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance())
py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
.def("get_inputs_name", &FC_Op::getInputsName)
.def("get_outputs_name", &FC_Op::getOutputsName);
.def("get_outputs_name", &FC_Op::getOutputsName)
.def("attributes_name", &FC_Op::staticGetAttrsName);
m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
}
......
......@@ -19,10 +19,11 @@ namespace py = pybind11;
namespace Aidge {
void init_Gather(py::module& m) {
py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance())
py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
.def("get_inputs_name", &Gather_Op::getInputsName)
.def("get_outputs_name", &Gather_Op::getOutputsName);
.def("get_outputs_name", &Gather_Op::getOutputsName)
.def("attributes_name", &Gather_Op::staticGetAttrsName);
m.def("Gather", &Gather, py::arg("axis"), py::arg("name") = "");
m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
}
} // namespace Aidge