Skip to content
Snippets Groups Projects
Commit a09e120a authored by Maxence Naud's avatar Maxence Naud
Browse files

- Fix node_export call to operator.attr

- Fix datatype call
- Change 'DataType' to 'dtype' for Python interface
parent 44a64216
No related branches found
No related tags found
2 merge requests!152Update Aidge export to take a graph view has an argument instead of a...,!145Improve UI for Operator/Node/GraphView/Tensor
Pipeline #49469 passed
from aidge_core.aidge_export_aidge.utils import operator_register from aidge_core.aidge_export_aidge.utils import operator_register
from aidge_core.aidge_export_aidge import ROOT_EXPORT from aidge_core.aidge_export_aidge import ROOT_EXPORT
from aidge_core import DataType, ExportNode, generate_file, generate_str from aidge_core import dtype, ExportNode, generate_file, generate_str
import numpy as np import numpy as np
from pathlib import Path from pathlib import Path
# Convert aidge datatype to C++ type # Convert aidge datatype to C++ type
datatype_converter = { datatype_converter = {
DataType.Float64 : "double", dtype.float64 : "double",
DataType.Float32 : "float", dtype.float32 : "float",
DataType.Float16 : "half_float::half", dtype.float16 : "half_float::half",
DataType.Int8 : "int8_t", dtype.int8 : "int8_t",
DataType.Int16 : "int16_t", dtype.int16 : "int16_t",
DataType.Int32 : "int32_t", dtype.int32 : "int32_t",
DataType.Int64 : "int64_t", dtype.int64 : "int64_t",
DataType.UInt8 : "uint8_t", dtype.uint8 : "uint8_t",
DataType.UInt16 : "uint16_t", dtype.uint16 : "uint16_t",
DataType.UInt32 : "uint32_t", dtype.uint32 : "uint32_t",
DataType.UInt64 : "uint64_t" dtype.uint64 : "uint64_t"
} }
......
...@@ -20,7 +20,7 @@ class ExportNode(ABC): ...@@ -20,7 +20,7 @@ class ExportNode(ABC):
self.node = aidge_node self.node = aidge_node
self.operator = aidge_node.get_operator() self.operator = aidge_node.get_operator()
self.name = self.node.name() self.name = self.node.name()
self.attributes = self.operator.attr.dict() # Attributes are auto fetched from aidge operators self.attributes = self.operator.attr.dict() if self.operator.attr is not None else {} # Attributes are auto fetched from aidge operators
# rename is_leaf ? # rename is_leaf ?
self.is_last = len(self.node.get_children()) == 0 self.is_last = len(self.node.get_children()) == 0
......
...@@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase): ...@@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32) np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
# Numpy -> Tensor # Numpy -> Tensor
t = aidge_core.Tensor(np_array) t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.int32) self.assertEqual(t.dtype(), aidge_core.dtype.int32)
for i_t, i_n in zip(t, np_array.flatten()): for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n) self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape): for i,j in zip(t.dims(), np_array.shape):
...@@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase): ...@@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase):
np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64) np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
# Numpy -> Tensor # Numpy -> Tensor
t = aidge_core.Tensor(np_array) t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.int64) self.assertEqual(t.dtype(), aidge_core.dtype.int64)
for i_t, i_n in zip(t, np_array.flatten()): for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n) self.assertTrue(i_t == i_n)
for i,j in zip(t.dims(), np_array.shape): for i,j in zip(t.dims(), np_array.shape):
...@@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase): ...@@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase):
np_array = np.random.rand(1, 1, 3, 3).astype(np.float32) np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
# Numpy -> Tensor # Numpy -> Tensor
t = aidge_core.Tensor(np_array) t = aidge_core.Tensor(np_array)
self.assertEqual(t.dtype(), aidge_core.DataType.float32) self.assertEqual(t.dtype(), aidge_core.dtype.float32)
for i_t, i_n in zip(t, np_array.flatten()): for i_t, i_n in zip(t, np_array.flatten()):
self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
for i,j in zip(t.dims(), np_array.shape): for i,j in zip(t.dims(), np_array.shape):
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
********************************************************************************/ ********************************************************************************/
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
#include "aidge/data/Data.hpp" #include "aidge/data/Data.hpp"
namespace py = pybind11; namespace py = pybind11;
...@@ -17,7 +18,7 @@ namespace Aidge { ...@@ -17,7 +18,7 @@ namespace Aidge {
void init_Data(py::module& m){ void init_Data(py::module& m){
// TODO : extend with more values ! // TODO : extend with more values !
py::enum_<DataType>(m, "DataType") py::enum_<DataType>(m, "dtype")
.value("float64", DataType::Float64) .value("float64", DataType::Float64)
.value("float32", DataType::Float32) .value("float32", DataType::Float32)
.value("float16", DataType::Float16) .value("float16", DataType::Float16)
...@@ -25,7 +26,7 @@ void init_Data(py::module& m){ ...@@ -25,7 +26,7 @@ void init_Data(py::module& m){
.value("int16", DataType::Int16) .value("int16", DataType::Int16)
.value("int32", DataType::Int32) .value("int32", DataType::Int32)
.value("int64", DataType::Int64) .value("int64", DataType::Int64)
.value("int8", DataType::Int8) .value("uint8", DataType::UInt8)
.value("uint16", DataType::UInt16) .value("uint16", DataType::UInt16)
.value("uint32", DataType::UInt32) .value("uint32", DataType::UInt32)
.value("uint64", DataType::UInt64) .value("uint64", DataType::UInt64)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment