Skip to content
Snippets Groups Projects
Commit c05db685 authored by Maxence Naud's avatar Maxence Naud Committed by Maxence Naud
Browse files

fix wrong python binding

parent d79e4eeb
No related branches found
No related tags found
3 merge requests!414Update version 0.5.1 -> 0.6.0,!408[Add] Dropout Operator,!361Move code from header to source
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include "aidge/graph/Node.hpp"
#include "aidge/operator/Operator.hpp" #include "aidge/operator/Operator.hpp"
#include "aidge/utils/Registrar.hpp" #include "aidge/utils/Registrar.hpp"
......
...@@ -117,12 +117,12 @@ class Tensor : public Data, ...@@ -117,12 +117,12 @@ class Tensor : public Data,
template <typename T, std::size_t SIZE_0> template <typename T, std::size_t SIZE_0>
constexpr Tensor(Array1D<T, SIZE_0> &&arr) constexpr Tensor(Array1D<T, SIZE_0> &&arr)
: Data(Type), : Data(Type),
mDataType(NativeType_v<T>), mDataType(NativeType_v<T>),
mDataFormat(DataFormat::Default), mDataFormat(DataFormat::Default),
mDims({SIZE_0}), mDims({SIZE_0}),
mStrides({1}), mStrides({1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0})), mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0})),
mSize(SIZE_0) mSize(SIZE_0)
{ {
mImpl->copyFromHost(&arr.data[0], SIZE_0); mImpl->copyFromHost(&arr.data[0], SIZE_0);
} }
...@@ -141,8 +141,7 @@ class Tensor : public Data, ...@@ -141,8 +141,7 @@ class Tensor : public Data,
mDims({SIZE_0, SIZE_1}), mDims({SIZE_0, SIZE_1}),
mStrides({SIZE_1, 1}), mStrides({SIZE_1, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1})), mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1})),
mSize(SIZE_0 * SIZE_1) mSize(SIZE_0 * SIZE_1) {
{
mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1); mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
} }
...@@ -156,13 +155,12 @@ class Tensor : public Data, ...@@ -156,13 +155,12 @@ class Tensor : public Data,
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2> template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
: Data(Type), : Data(Type),
mDataType(NativeType_v<T>), mDataType(NativeType_v<T>),
mDataFormat(DataFormat::Default), mDataFormat(DataFormat::Default),
mDims({SIZE_0, SIZE_1, SIZE_2}), mDims({SIZE_0, SIZE_1, SIZE_2}),
mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}), mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2})), mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2})),
mSize(SIZE_0 * SIZE_1 * SIZE_2) mSize(SIZE_0 * SIZE_1 * SIZE_2) {
{
mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2); mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
} }
...@@ -177,13 +175,12 @@ class Tensor : public Data, ...@@ -177,13 +175,12 @@ class Tensor : public Data,
template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3> template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
: Data(Type), : Data(Type),
mDataType(NativeType_v<T>), mDataType(NativeType_v<T>),
mDataFormat(DataFormat::Default), mDataFormat(DataFormat::Default),
mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}), mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}), mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})), mImpl(Registrar<Tensor>::create({"cpu", NativeType_v<T>})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
{
mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3); mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
} }
......
...@@ -82,7 +82,7 @@ public: ...@@ -82,7 +82,7 @@ public:
} }
}; };
inline std::shared_ptr<Node> And(const std::string& name = ""); std::shared_ptr<Node> And(const std::string& name = "");
} // namespace Aidge } // namespace Aidge
......
...@@ -25,7 +25,7 @@ public: ...@@ -25,7 +25,7 @@ public:
void init_DynamicAnalysis(py::module& m){ void init_DynamicAnalysis(py::module& m){
py::class_<DynamicAnalysis, std::shared_ptr<DynamicAnalysis>, pyDynamicAnalysis>(m, "DynamicAnalysis", py::multiple_inheritance(), py::dynamic_attr()) py::class_<DynamicAnalysis, std::shared_ptr<DynamicAnalysis>, pyDynamicAnalysis>(m, "DynamicAnalysis", py::multiple_inheritance(), py::dynamic_attr())
.def(py::init<std::shared_ptr<GraphView>>(), py::arg("graph")) .def(py::init<const Scheduler&>(), py::arg("scheduler"))
.def("get_nb_arithm_ops", &DynamicAnalysis::getNbArithmOps) .def("get_nb_arithm_ops", &DynamicAnalysis::getNbArithmOps)
.def("get_nb_logic_ops", &DynamicAnalysis::getNbLogicOps) .def("get_nb_logic_ops", &DynamicAnalysis::getNbLogicOps)
.def("get_nb_comp_ops", &DynamicAnalysis::getNbCompOps) .def("get_nb_comp_ops", &DynamicAnalysis::getNbCompOps)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment