diff --git a/aidge_core/show_graphview.py b/aidge_core/show_graphview.py index 633298f10dbfdafe40022f88f741f82d2d35c681..4f6a2960348c44dc7b8a0b957f777ddac5a8562a 100644 --- a/aidge_core/show_graphview.py +++ b/aidge_core/show_graphview.py @@ -79,29 +79,32 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e if parents[0] is None: parents.append(parents.pop(0)) else: pass - + parents_inputs = [] - for parent in parents: + input_idx = 0 + for parent in node.get_parents(): if parent is not None: - for output_idx in range(parent.get_operator().nb_outputs()): - for input_idx in range(node.get_operator().nb_inputs()): - if parent.get_operator().get_output(output_idx).dims() == node.get_operator().get_input(input_idx).dims(): + for children in parent.outputs(): + for child in children: + if child[0] == node and child[1] == input_idx: parents_inputs.append((parent.name(), input_idx)) - + elif parent is None: - for input_idx in list(range(node.get_operator().nb_inputs())): - if input_idx not in [item[1] for item in parents_inputs]: - parents_inputs.append((None, input_idx)) - - parents_inputs.sort(key=lambda x: x[1]) + if input_idx not in [item[1] for item in parents_inputs]: + parents_inputs.append((None, input_idx)) + + input_idx += 1 node_dict['parents'] = parents_inputs children_outputs = [] - for child in node.get_children(): - for input_idx in range(child.get_operator().nb_inputs()): - for output_idx in range(node.get_operator().nb_outputs()): - if child.get_operator().get_input(input_idx).dims() == node.get_operator().get_output(output_idx).dims(): - children_outputs.append((child.name(), output_idx)) + output_idx = 0 + for children in node.get_ordered_children(): + for child in children: + if child is not None: + for parent in child.inputs(): + if parent[0] == node and parent[1] == output_idx: + children_outputs.append((child.name(), output_idx)) + output_idx += 1 node_dict['children'] = children_outputs # Check if my node is a metaop @@ -129,7 +132,7 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e if params_file_format=='npz': np.savez_compressed(Path(path_trainable_params, node.name()), **{node.name() : node.get_operator().get_output(0)}) - node_dict['tensor_data'] = Path(path_trainable_params, node.name() + '.npz') + node_dict['tensor_data'] = str(Path(path_trainable_params, node.name() + '.npz')) elif params_file_format=='json': tensor = np.array(node.get_operator().get_output(0)) @@ -145,13 +148,13 @@ def _create_dict(ordered_nodes : list[aidge_core.Node], write_trainable_params_e with open(Path(path_trainable_params, node.name() + '.json'), 'w') as fp: json.dump(tensor_dict, fp, indent=4) - node_dict['tensor_data'] = Path(path_trainable_params, node.name() + '.json') + node_dict['tensor_data'] = str(Path(path_trainable_params, node.name() + '.json')) else: raise Exception("File format to write trainable parameters not recognized.") - elif write_trainable_params_embed: + if write_trainable_params_embed: node_dict['tensor_data'] = np.array(node.get_operator().get_output(0)).tolist() else: @@ -195,17 +198,21 @@ def gview_to_json(gview : aidge_core.GraphView, json_path : Path, write_trainabl :type params_file_format: str, optional """ - if json_path.is_dir(): - json_path = (json_path.parent).joinpath('model.json') + if not json_path.suffix: + if not json_path.is_dir(): + json_path.mkdir(parents=True, exist_ok=True) + json_path = json_path.joinpath('model.json') - elif not json_path.is_dir(): - if json_path.suffix == '.json': - pass - else: - raise Exception('If ``json_path`` contains a filename it must be of JSON format.') + else: + if json_path.suffix != '.json': + raise Exception('If ``json_path`` contains a filename, it must be of JSON format.') + if not json_path.parent.is_dir(): + json_path.parent.mkdir(parents=True, exist_ok=True) if write_trainable_params_ext: path_trainable_params = (json_path.parent).joinpath(json_path.stem + '_trainable_params/') + path_trainable_params.mkdir(parents=True, exist_ok=True) + else: path_trainable_params = Path() diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp index fd2a0b3f42d5888a68edb18caf046cea71dec0f3..9390fe5860b5d3523886856d9b2a40752d338af5 100644 --- a/include/aidge/backend/cpu/data/TensorImpl.hpp +++ b/include/aidge/backend/cpu/data/TensorImpl.hpp @@ -50,10 +50,10 @@ public: void zeros() override final; void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final { + AIDGE_ASSERT(offset + length <= mNbElts, "TensorImpl_cpu<{}>::copy(): copy offset ({}) + length ({}) is above capacity ({})", typeid(T).name(), offset, length, mNbElts); const T* srcT = static_cast<const T *>(src); T* dstT = static_cast<T *>(rawPtr(offset)); - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copy(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts); AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "TensorImpl_cpu<{}>::copy(): overlapping copy is not supported", typeid(T).name()); std::copy(srcT, srcT + length, dstT); } @@ -72,7 +72,7 @@ public: void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final { const T* src = static_cast<const T*>(rawPtr(offset)); - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copyToHost(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts); + AIDGE_ASSERT(offset + length <= mData.size(), "TensorImpl_cpu<{}>::copy(): copy offset ({}) + length ({}) is above capacity ({})", typeid(T).name(), offset, length, mData.size()); std::copy(src, src + length, static_cast<T *>(dst)); } diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp index 35f6327444a874d8f5c2e94da6520244e095263a..69a28960b57e6ba2ac8a699bf45ff09961fa4135 100644 --- a/python_binding/graph/pybind_Node.cpp +++ b/python_binding/graph/pybind_Node.cpp @@ -176,6 +176,11 @@ void init_Node(py::module& m) { Get children. )mydelimiter") + .def("get_ordered_children", &Node::getOrderedChildren, + R"mydelimiter( + Get ordered children. + )mydelimiter") + .def("__call__", [](Node &self, pybind11::args args) { std::vector<Connector> connectors; diff --git a/src/backend/cpu/data/TensorImpl.cpp b/src/backend/cpu/data/TensorImpl.cpp index ed3c96f80c1b8bafd70425451d6618428d1888f0..506287a0c520915e6426f1f0b64d9c562c754d33 100644 --- a/src/backend/cpu/data/TensorImpl.cpp +++ b/src/backend/cpu/data/TensorImpl.cpp @@ -47,8 +47,8 @@ void Aidge::TensorImpl_cpu<T>::copyCast(const void *src, const Aidge::DataType s return; } + AIDGE_ASSERT(offset + length <= mNbElts, "TensorImpl_cpu<{}>::copyCast(): copy offset ({}) + length ({}) is above capacity ({})", typeid(T).name(), offset, length, mNbElts); T* dstT = static_cast<T *>(rawPtr(offset)); - AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "TensorImpl_cpu<{}>::copyCast(): copy length ({}) is above capacity ({})", typeid(T).name(), length, mNbElts); switch (srcDt) { case DataType::Float64: diff --git a/unit_tests/backend/Test_TensorImpl.cpp b/unit_tests/backend/Test_TensorImpl.cpp index 43e25092a0f502698bbff7b0142969154f2cb0b0..ceb6772d01d4ee84524896fead96abcb445f84ff 100644 --- a/unit_tests/backend/Test_TensorImpl.cpp +++ b/unit_tests/backend/Test_TensorImpl.cpp @@ -47,6 +47,7 @@ TEST_CASE("Tensor fill", "[TensorImpl][fill]") { concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0); concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5); concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10); + REQUIRE_THROWS(concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 11)); // concatenatedTensor->print(); std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{