diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py index 7bdb1f48b7498becb318d6b3eccd850f7f375623..11d72905cd7a935e6934d8ecc0de666fad684e69 100644 --- a/aidge_core/unit_tests/test_recipies.py +++ b/aidge_core/unit_tests/test_recipies.py @@ -11,9 +11,8 @@ SPDX-License-Identifier: EPL-2.0 import unittest import aidge_core -class test_parameters(unittest.TestCase): - """Very basic test to make sure the python APi is not broken. - Can be remove in later stage of the developpement. +class test_recipies(unittest.TestCase): + """ """ def setUp(self): pass diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6887a3a9093b6db5000ab0ab3666258c4307c4 --- /dev/null +++ b/aidge_core/unit_tests/test_tensor.py @@ -0,0 +1,44 @@ +""" +Copyright (c) 2023 CEA-List + +This program and the accompanying materials are made available under the +terms of the Eclipse Public License 2.0 which is available at +http://www.eclipse.org/legal/epl-2.0. + +SPDX-License-Identifier: EPL-2.0 +""" + +import unittest +import aidge_core + +from functools import reduce +import numpy as np + +class test_tesnor(unittest.TestCase): + """ + """ + def setUp(self): + pass + + def tearDown(self): + pass + + def test_remove_flatten(self): + dims = [2,2,2] + size = reduce((lambda x, y: x*y), dims) + + np_array = np.arange(size).reshape(dims) + + t = aidge_core.Tensor(np_array) + for i in range(size): + coord = t.get_coord(i) + idx = t.get_idx(coord) + self.assertEqual(idx, i) + +if __name__ == '__main__': + unittest.main() + + + + + diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp index c3a6e478f8943253a9f9b3565db2d4452a9ca133..468f48feaecc62ee10cd980ce42f18c99d9bc549 100644 --- a/include/aidge/data/Tensor.hpp +++ b/include/aidge/data/Tensor.hpp @@ -559,6 +559,40 @@ class Tensor : public Data, return mGrad; } + /** + * @brief From the the 1D index, return the coordinate of an element in the tensor. + * + * @param flatIdx 1D index of the value considering a flatten tensor. + * @return std::vector<DimSize_t> + */ + std::vector<DimSize_t> getCoord(DimSize_t flatIdx){ + std::vector<DimSize_t> coordIdx = {}; + DimSize_t idx = flatIdx; + for (DimSize_t d: mDims){ + coordIdx.push_back(idx % d); + idx/=d; + } + return coordIdx; + } + + /** + * @brief From the coordinate returns the 1D index of an element in the tensor. + * + * @param coordIdx Coordinate to an element in the tensor + * @return DimSize_t + */ + DimSize_t getIdx(std::vector<DimSize_t> coordIdx){ + DimSize_t flatIdx = 0; + DimSize_t stride = 1; + assert(coordIdx.size() == mDims.size() && "Coordinates does not match number of dimensions"); + for(std::size_t i=0; i< mDims.size(); ++i){ + assert(coordIdx[i] < mDims[i] && "Coordinates dimensions does not fit the dimensions of the tensor"); + flatIdx += (coordIdx[i] * stride); + stride *= mDims[i]; + } + return flatIdx; + } + private: ///\bug not protected against overflow std::size_t computeSize() { diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp index d6442723ecc79527e8eaa7d3e03a466c085dfa58..168c2c946efa297bbc876095fc4274a3df67b21c 100644 --- a/python_binding/data/pybind_Tensor.cpp +++ b/python_binding/data/pybind_Tensor.cpp @@ -26,10 +26,10 @@ namespace Aidge { template<typename T> void addCtor(py::class_<Tensor, - std::shared_ptr<Tensor>, - Data, + std::shared_ptr<Tensor>, + Data, Registrable<Tensor, - std::tuple<std::string, DataType>, + std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor&)>>& mTensor){ mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) { /* Request a buffer descriptor from Python */ @@ -46,7 +46,7 @@ void addCtor(py::class_<Tensor, }else{ printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n"); } - + return newTensor; })); } @@ -54,16 +54,16 @@ void addCtor(py::class_<Tensor, void init_Tensor(py::module& m){ py::class_<Registrable<Tensor, - std::tuple<std::string, DataType>, + std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor&)>, std::shared_ptr<Registrable<Tensor, - std::tuple<std::string, DataType>, + std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor&)>>>(m,"TensorRegistrable"); - py::class_<Tensor, std::shared_ptr<Tensor>, - Data, + py::class_<Tensor, std::shared_ptr<Tensor>, + Data, Registrable<Tensor, - std::tuple<std::string, DataType>, + std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor&)>> pyClassTensor (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol()); @@ -74,6 +74,8 @@ void init_Tensor(py::module& m){ .def("size", &Tensor::size) .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) &Tensor::resize) .def("has_impl", &Tensor::hasImpl) + .def("get_coord", &Tensor::getCoord) + .def("get_idx", &Tensor::getIdx) .def_static("get_available_backends", &Tensor::getAvailableBackends) .def("__str__", [](Tensor& b) { return b.toString(); @@ -142,6 +144,6 @@ void init_Tensor(py::module& m){ // #if SIZE_MAX != 0xFFFFFFFF addCtor<double>(pyClassTensor); // #endif - + } }