Skip to content
Snippets Groups Projects
Commit c131ada5 authored by Thibault Allenet's avatar Thibault Allenet
Browse files

Merge dev into dataloader

parents 7fc2dd95 95077929
No related branches found
No related tags found
No related merge requests found
Showing
with 419 additions and 60 deletions
......@@ -125,6 +125,23 @@ class test_operator_binding(unittest.TestCase):
generic_op.forward() # Increment idx
self.assertEqual(customImpl.idx, 1)
def test_magic_meth(self):
myVar = 2
myBool = True
# Test dynamic attribute set
gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
gop.myBool = myBool
# Test variable set by kwargs
self.assertEqual(gop.myVar, myVar)
# Test set attr
self.assertEqual(gop.myBool, myBool)
# Test static attribute set !
prod = aidge_core.Producer([1]).get_operator()
self.assertEqual(prod.Constant, False)
prod.Constant = True # By default Constant is False
self.assertEqual(prod.Constant, True)
if __name__ == '__main__':
......
......@@ -10,16 +10,16 @@ SPDX-License-Identifier: EPL-2.0
import unittest
import aidge_core
from functools import reduce
import numpy as np
class test_tensor(unittest.TestCase):
"""
"""Test tensor binding
"""
def setUp(self):
pass
def tearDown(self):
pass
......@@ -35,10 +35,60 @@ class test_tensor(unittest.TestCase):
idx = t.get_idx(coord)
self.assertEqual(idx, i)
if __name__ == '__main__':
unittest.main()
def test_getavailable_backends(self):
self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends())
def test_numpy_int_to_tensor(self):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape):
self.assertEqual(i,j)
def test_tensor_int_to_numpy(self):
np_array = np.arange(9).reshape(1,1,3,3)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
# Tensor -> Numpy
nnarray = np.array(t)
for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()):
self.assertTrue(i_nn == i_n)
for i,j in zip(t.dims(), nnarray.shape):
self.assertEqual(i,j)
def test_numpy_int64_to_tensor(self):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape):
self.assertEqual(i,j)
def test_numpy_float_to_tensor(self):
t = aidge_core.Tensor()
np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
for i,j in zip(t.dims(), np_array.shape):
self.assertEqual(i,j)
def test_get_set(self):
dims = [2,2,2]
np_array = np.arange(8).reshape(dims).astype(np.int32)
# Numpy -> Tensor
t = aidge_core.Tensor(np_array)
for i in range(8):
self.assertEqual(t[i], i)
t[i] = 5
self.assertEqual(t[i], 5)
if __name__ == '__main__':
unittest.main()
......@@ -15,6 +15,10 @@
#include "aidge/backend/OperatorImpl.hpp"
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/backend/StimulusImpl.hpp"
#include "aidge/backend/cpu/data/TensorImpl.hpp"
#include "aidge/backend/cpu/data/GetCPUPtr.h"
#include "aidge/data/Data.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/Database.hpp"
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
#define AIDGE_CPU_DATA_GETCPUPTR_H_
#include "aidge/data/Tensor.hpp"
namespace Aidge {
inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
const auto tensor = std::static_pointer_cast<Tensor>(data);
return tensor->getImpl()->hostPtr(tensor->getImplOffset());
}
} // namespace Aidge
#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
#define AIDGE_CPU_DATA_TENSORIMPL_H_
#include "aidge/backend/TensorImpl.hpp"
#include "aidge/data/Tensor.hpp"
#include "aidge/data/half.hpp"
#include "aidge/utils/Registrar.hpp"
#include "aidge/utils/Types.h"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/future_std/span.hpp"
namespace Aidge {
template <class T>
class TensorImpl_cpu : public TensorImpl {
private:
/// Pointer to the data and its capacity
future_std::span<T> mData;
/// If this instance own the data, std::unique_ptr manages it
std::unique_ptr<T[]> mDataOwner;
public:
static constexpr const char *Backend = "cpu";
TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
bool operator==(const TensorImpl &otherImpl) const override final {
const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
std::size_t i = 0;
for (; i < mNbElts &&
*(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
++i) {
}
return i == mNbElts;
}
static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
return std::make_shared<TensorImpl_cpu<T>>(device, length);
}
inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
const T* srcT = static_cast<const T *>(src);
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
std::copy(srcT, srcT + length, dstT);
}
void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
if (length == 0) {
return;
}
T* dstT = static_cast<T *>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
switch (srcDt)
{
case DataType::Float64:
std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
dstT);
break;
case DataType::Float32:
std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
dstT);
break;
case DataType::Float16:
std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
dstT);
break;
case DataType::Int64:
std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
dstT);
break;
case DataType::UInt64:
std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
dstT);
break;
case DataType::Int32:
std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
dstT);
break;
case DataType::UInt32:
std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
dstT);
break;
case DataType::Int16:
std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
dstT);
break;
case DataType::UInt16:
std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
dstT);
break;
case DataType::Int8:
std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
dstT);
break;
case DataType::UInt8:
std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
dstT);
break;
default:
AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
break;
}
}
void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
AIDGE_ASSERT(device.first == Backend, "backend must match");
AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
copy(src, length, offset);
}
inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
copy(src, length, offset);
}
void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
const T* src = static_cast<const T*>(rawPtr(offset));
AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
std::copy(src, src + length, static_cast<T *>(dst));
}
void *rawPtr(NbElts_t offset = 0) override final {
lazyInit();
return (mData.data() + offset);
};
const void *rawPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
return (mData.data() + offset);
};
void *hostPtr(NbElts_t offset = 0) override final {
lazyInit();
return (mData.data() + offset);
};
const void *hostPtr(NbElts_t offset = 0) const override final {
AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
return (mData.data() + offset);
};
void setRawPtr(void *ptr, NbElts_t length) override final {
AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
mData = future_std::span<T>(static_cast<T *>(ptr), length);
mDataOwner.reset();
};
virtual ~TensorImpl_cpu() = default;
private:
void lazyInit() {
if (mData.size() < mNbElts) {
// Need more data, a re-allocation will occur
AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
mDataOwner.reset(new T[mNbElts]);
mData = future_std::span<T>(mDataOwner.get(), mNbElts);
}
}
};
namespace {
static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
{"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
{"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
{"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
{"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
{"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
} // namespace
} // namespace Aidge
#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
......@@ -12,8 +12,10 @@
#ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
#define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
#include <algorithm> // std::for_each
#include <array>
#include <cmath>
#include <cstdint> // std::int32_t
#include <numeric>
#include <vector>
......@@ -31,18 +33,18 @@ enum class ReduceMeanAttr { Axes, KeepDims };
template <DimIdx_t DIM>
class ReduceMean_Op : public OperatorTensor,
public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
public StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t> {
public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> {
public:
static const std::string Type;
ReduceMean_Op() = delete;
using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t>;
using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t>;
template <ReduceMeanAttr e>
using attr = typename Attributes_::template attr<e>;
constexpr ReduceMean_Op(const std::array<int, DIM> &axes, DimSize_t keep_dims)
constexpr ReduceMean_Op(const std::array<std::int32_t, DIM> &axes, DimSize_t keep_dims)
: OperatorTensor(Type, 1, 0, 1),
Attributes_(attr<ReduceMeanAttr::Axes>(axes),
attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
......@@ -67,29 +69,28 @@ class ReduceMean_Op : public OperatorTensor,
}
void computeOutputDims() override final {
if (!getInput(0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
}
if (!getInput(0)->empty()) {
std::vector<DimSize_t> outDims;
for(std::size_t d=0; d<getInput(0)->dims().size(); ++d)
{
bool reducedDim = false;
for(std::size_t i=0; i<DIM; ++i)
{
int axis_ = this->template getAttr<ReduceMeanAttr::Axes>()[i];
std::size_t axis= axis_>=0? axis_: axis_ + getInput(0)->nbDims();
if(axis == d)
{
reducedDim = true;
break;
}
}
if(reducedDim)
{
if(this->template getAttr<ReduceMeanAttr::KeepDims>())
outDims.push_back(1);
}
else
outDims.push_back(getInput(0)->dims()[d]);
// make Axes attribute positive
std::array<std::int32_t, DIM>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
if (val < 0)
val+=static_cast<std::int32_t>(getInput(0)->nbDims());
});
std::sort(axes.begin(), axes.end());
// build output dimensions
std::vector<DimSize_t> outDims = getInput(0)->dims();
if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
std::for_each(axes.begin(), axes.end(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
}
else {
for (auto it = axes.crbegin(); it != axes.crend(); ++it)
outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
}
if(outDims.size()>0)
mOutputs[0]->resize(outDims);
else
......@@ -111,7 +112,7 @@ class ReduceMean_Op : public OperatorTensor,
};
template <std::array<DimSize_t, 1>::size_type DIM>
inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
inline std::shared_ptr<Node> ReduceMean(const std::array<std::int32_t, DIM> &axes,
DimSize_t keep_dims=1,
const std::string& name = "") {
// FIXME: properly handle default w&b initialization in every cases
......@@ -123,7 +124,7 @@ inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
template <DimSize_t DIM>
inline std::shared_ptr<Node> ReduceMean(
int const (&axes)[DIM],
std::int32_t const (&axes)[DIM],
DimSize_t keep_dims = 1,
const std::string& name = "") {
static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
......
......@@ -69,6 +69,11 @@ public:
* be agnostic from its return type.
*/
virtual py::object getAttrPy(const std::string& name) const = 0;
/* Bindable set function, does not recquire any templating.
* This is thanks to py::object which allow the function to
* be agnostic from ``value`` type.
*/
virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
#endif
virtual ~Attributes() {}
};
......
......@@ -135,7 +135,7 @@ public:
assert(res.second && "attribute already exists");
}
void setAttrPy(const std::string& name, py::object&& value)
void setAttrPy(const std::string& name, py::object&& value) override final
{
auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
if (!resPy.second)
......@@ -204,7 +204,7 @@ private:
// Stores C++ attributes (copy) and Python-only attributes
// Code should be compiled with -fvisibility=hidden
// See https://pybind11.readthedocs.io/en/stable/faq.html:
// “‘SomeClass’ declared with greater visibility than the type of its
// “‘SomeClass’ declared with greater visibility than the type of its
// field ‘SomeClass::member’ [-Wattributes]”
// This map will only be populated if Python interpreter is running
std::map<std::string, py::object> mAttrsPy;
......
......@@ -202,6 +202,22 @@ public:
}
#ifdef PYBIND
/**
* @brief Return a set of attributes defined.
* This method is used to automatically retrieve attributes in the documentation.
* This method is a duplicate of ``getAttrsName`` but static.
*
* @return std::set<std::string>
*/
static std::set<std::string> staticGetAttrsName() {
std::set<std::string> attrsName;
for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
}
return attrsName;
}
py::object getAttrPy(const std::string& name) const override {
for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
......@@ -212,7 +228,22 @@ public:
}
AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
};
}
void setAttrPy(const std::string& name, py::object&& value) override final{
for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
// Cannot update attribute using reference has it would require templating
// Use a dirty
auto tmpAttr = py::cast(mAttrs);
py::detail::accessor_policies::tuple_item::set(tmpAttr, static_cast<py::size_t>(i), value);
mAttrs = py::cast<std::tuple<T...>>(tmpAttr);
return;
}
}
AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"%s\" not found", name.c_str());
}
#endif
private:
......
......@@ -31,24 +31,26 @@ void addCtor(py::class_<Tensor,
Registrable<Tensor,
std::tuple<std::string, DataType>,
std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) {
mTensor.def(py::init([](
py::array_t<T, py::array::c_style | py::array::forcecast> b,
std::string backend = "cpu") {
/* Request a buffer descriptor from Python */
py::buffer_info info = b.request();
Tensor* newTensor = new Tensor();
newTensor->setDataType(NativeType<T>::type);
const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
newTensor->resize(dims);
// TODO : Find a better way to choose backend
std::set<std::string> availableBackends = Tensor::getAvailableBackends();
if (availableBackends.find("cpu") != availableBackends.end()){
newTensor->setBackend("cpu");
if (availableBackends.find(backend) != availableBackends.end()){
newTensor->setBackend(backend);
newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
}else{
printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend %s, verify you have `import aidge_backend_%s`.\n", backend.c_str(), backend.c_str());
}
return newTensor;
}))
}), py::arg("array"), py::arg("backend")="cpu")
.def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
.def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
;
......
......@@ -26,7 +26,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const std::array<DimSize_t, DIM> &,
......@@ -34,7 +34,8 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
py::arg("kernel_dims"),
py::arg("stride_dims"))
.def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
.def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
.def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
.def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
const std::string& name,
......
......@@ -21,9 +21,10 @@ namespace Aidge {
template <DimSize_t DIM>
void declare_BatchNormOp(py::module& m) {
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
.def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
.def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
.def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
.def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
}
......
......@@ -19,9 +19,10 @@ namespace py = pybind11;
namespace Aidge {
void init_Concat(py::module& m) {
py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance())
py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
.def("get_inputs_name", &Concat_Op::getInputsName)
.def("get_outputs_name", &Concat_Op::getOutputsName);
.def("get_outputs_name", &Concat_Op::getOutputsName)
.def("attributes_name", &Concat_Op::staticGetAttrsName);
m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
}
......
......@@ -24,7 +24,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<DimSize_t,
......@@ -39,6 +39,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
py::arg("dilation_dims"))
.def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
.def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
.def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
;
m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
......
......@@ -26,7 +26,7 @@ namespace py = pybind11;
namespace Aidge {
template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>(
py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
py::multiple_inheritance())
.def(py::init<const DimSize_t,
......@@ -38,7 +38,8 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
py::arg("stride_dims"),
py::arg("dilation_dims"))
.def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
.def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
.def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
const std::vector<DimSize_t>& kernel_dims,
......
......@@ -20,9 +20,10 @@ namespace py = pybind11;
namespace Aidge {
void declare_FC(py::module &m) {
py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance())
py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
.def("get_inputs_name", &FC_Op::getInputsName)
.def("get_outputs_name", &FC_Op::getOutputsName);
.def("get_outputs_name", &FC_Op::getOutputsName)
.def("attributes_name", &FC_Op::staticGetAttrsName);
m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
}
......
......@@ -19,9 +19,10 @@ namespace py = pybind11;
namespace Aidge {
void init_Gather(py::module& m) {
py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance())
py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
.def("get_inputs_name", &Gather_Op::getInputsName)
.def("get_outputs_name", &Gather_Op::getOutputsName);
.def("get_outputs_name", &Gather_Op::getOutputsName)
.def("attributes_name", &Gather_Op::staticGetAttrsName);
m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
}
......
......@@ -21,13 +21,36 @@ namespace py = pybind11;
namespace Aidge {
void init_GenericOperator(py::module& m) {
py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor, DynamicAttributes>(m, "GenericOperatorOp",
py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
py::multiple_inheritance())
.def_readonly_static("identity", &GenericOperator_Op::Identity)
.def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
.def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"),
py::arg("name") = "");
// &GenericOperator
m.def("GenericOperator",
[]( const std::string& type,
IOIndex_t nbData,
IOIndex_t nbParam,
IOIndex_t nbOut,
const std::string& name,
const py::kwargs kwargs){
std::shared_ptr<Node> genericNode = GenericOperator(
type,
nbData,
nbParam,
nbOut,
name
);
if (kwargs){
std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
for (auto item : kwargs) {
std::string key = py::cast<std::string>(item.first);
py::object value = py::reinterpret_borrow<py::object>(item.second);
gop->setAttrPy(key, std::move(value));
}
}
return genericNode;
}, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
}
} // namespace Aidge
......@@ -18,9 +18,10 @@ namespace py = pybind11;
namespace Aidge {
void init_LeakyReLU(py::module& m) {
py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
.def("get_inputs_name", &LeakyReLU_Op::getInputsName)
.def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
.def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
.def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
}
......
......@@ -20,9 +20,10 @@ namespace py = pybind11;
namespace Aidge {
void declare_MatMul(py::module &m) {
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor, Attributes>(m, "MatMulOp", py::multiple_inheritance())
py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Attributes, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
.def("get_inputs_name", &MatMul_Op::getInputsName)
.def("get_outputs_name", &MatMul_Op::getOutputsName);
.def("get_outputs_name", &MatMul_Op::getOutputsName)
.def("attributes_name", &MatMul_Op::staticGetAttrsName);
m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment