diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 565bcc358250312150281dba6ab26de97a7e0cac..c6f760c1c999b1b1031fdfd2a0bd5164ff80c3bd 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -136,8 +136,11 @@ public:
         return mTensor;
     };
 
-    void
-    getCoord(std::size_t const flatIdx, std::vector<Coord_t> &coordIdx) const noexcept
+    /// @brief Get the logical coordinates of the data at given index
+    /// @param coordIdx coordinates of the desired data
+    /// @note The index is expressed in number of elements
+    /// @return Logical coordinates of the data at given index
+    void getCoord(NbElts_t const flatIdx, std::vector<Coord_t> &coordIdx) const noexcept
     {
         assert(
             (coordIdx.size() == mvDimensions.size())
@@ -153,12 +156,19 @@ public:
         /// @todo implement it with synchronized reverse iterators
         for (std::size_t i = mvDimensions.size() - 1; (i--) > 0;)
         {
+            // computes coordinates in 0-based coordinates
             coordIdx[i] = In % mvDimensions[i];
             In = (In - coordIdx[i]) / mvDimensions[i];
+            // shift with respect to first data coordinates
+            coordIdx[i] = coordIdx[i] + mvFirstDataCoordinates[i];
         }
     };
 
-    std::size_t getIdx(std::vector<Coord_t> const &coordIdx) const noexcept
+    /// @brief Get the linear index of the first Byte_t of the data at given coordinates
+    /// @param coordIdx coordinates of the desired data
+    /// @note The index is expressed in number of elements
+    /// @return Linear index of the first Byte_t of the data at given coordinates
+    NbElts_t getIdx(std::vector<Coord_t> const &coordIdx) const noexcept
     {
         std::size_t flatIdx(0);
         for (std::size_t i = 0; i < mvDimensions.size(); ++i)
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 07684241ee9a8336692fc0da911a56b31457903a..4edd385673b4743683c944a06cae5e59cbfaa94c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -63,6 +63,7 @@ using ImplPtr_t = std::unique_ptr<TensorImpl, SImplDeleter>;
  * related coordinates, around an active data for which it is valid to read a value.<br>
  * Contexts are useful for operator whose output for a data depends on its neighbors.
  */
+/// @todo constness consistency: what is mutable within a const Tensor?
 class Tensor : public Data,
                public Registrable<
                    Tensor,
@@ -474,13 +475,20 @@ public:
 
     std::shared_ptr<Tensor> grad();
 
-private:
-    /// @brief Get the linear index of the first Byte_t of the data at given coordinates
+    /// @brief Get the logical coordinates of the data at given index
+    /// @param coordIdx coordinates of the desired data
+    /// @note The index is expressed in number of elements
+    /// @return Logical coordinates of the data at given index
+    void getCoord(NbElts_t const flatIdx, std::vector<Coord_t> &coordIdx) const noexcept;
+
+    /// @brief Get the linear index of the first Byte_t of the data at given
+    /// coordinates
     /// @param coordIdx coordinates of the desired data
     /// @note The index is expressed in number of elements
     /// @return Linear index of the first Byte_t of the data at given coordinates
-    std::size_t getIdx(std::vector<Coord_t> const &coordIdx) const noexcept;
+    NbElts_t getIdx(std::vector<Coord_t> const &coordIdx) const noexcept;
 
+private:
     /// @brief Getting the address of the very first data in memory (lexicographic
     /// order), read only access to data
     /// @details Points to a contiguous in-memory array such that, when cast to
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index c453d2481137a0d340cc4b44c79061511b750a5e..73fab8c0f488e20853d5964cda31d795b4c209c1 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -21,7 +21,6 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Operator.hpp"
-#include "aidge/utils/Parameter.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index bacf20c21032d30b5560217d615d7c08477c6c18..1097149270b9aeadd0c836cffa1ead3048baa446 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -70,7 +70,7 @@ public:
         // cpy-ctor
         setDatatype(op.mOutput->dataType());
         mImpl = op.mImpl ?
-                    Registrar<MatMul_Op>::create(mOutput->getImpl()->backend())(*this) :
+                    Registrar<MatMul_Op>::create(mOutput->getImpl().backend())(*this) :
                     nullptr;
     }
 
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 44f8d95cdb39b8c4778136d87510a6358148bebc..9cc8c33b5dbfbeb5c93e5bb46da8d573d49a6f0a 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -34,31 +34,32 @@ void addCtor(py::class_<
                  detail::pimpl::ImplPtr_t(const Tensor&)>>& mTensor)
 {
     mTensor
-        .def(py::init([](py::array_t<T, py::array::c_style | py::array::forcecast> b) {
-            /* Request a buffer descriptor from Python */
-            py::buffer_info info = b.request();
-            Tensor* newTensor = new Tensor();
-            newTensor->setDatatype(NativeType<T>::type);
-            const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
-            newTensor->resize(dims);
-            // TODO : Find a better way to choose backend
-            std::set<std::string> availableBackends = Tensor::getAvailableBackends();
-            if (availableBackends.find("cpu") != availableBackends.end())
+        .def(py::init(
+            [](py::array_t<T, py::array::c_style | py::array::forcecast> b)
             {
-                newTensor->setBackend("cpu");
-                newTensor->getImpl().setRawPtr(reinterpret_cast<Byte_t*>(info.ptr));
-            }
-            else
-            {
-                printf("Warning : Could not use aidge_cpu backend, verify you have "
-                       "`import aidge_cpu`\n");
-            }
+                /* Request a buffer descriptor from Python */
+                py::buffer_info info = b.request();
+                Tensor* newTensor = new Tensor();
+                newTensor->setDatatype(NativeType<T>::type);
+                const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
+                newTensor->resize(dims);
+                // TODO : Find a better way to choose backend
+                std::set<std::string> availableBackends = Tensor::getAvailableBackends();
+                if (availableBackends.find("cpu") != availableBackends.end())
+                {
+                    newTensor->setBackend("cpu");
+                    newTensor->getImpl().setRawPtr(reinterpret_cast<Byte_t*>(info.ptr));
+                }
+                else
+                {
+                    printf("Warning : Could not use aidge_cpu backend, verify you have "
+                           "`import aidge_cpu`\n");
+                }
 
-            return newTensor;
-        }))
-        .def("__setitem__", (void (Tensor::*)(std::size_t, T)) & Tensor::set)
-        .def(
-            "__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) & Tensor::set);
+                return newTensor;
+            }))
+        .def("__setitem__", (void(Tensor::*)(NbElts_t, T)) & Tensor::set)
+        .def("__setitem__", (void(Tensor::*)(std::vector<Coord_t>, T)) & Tensor::set);
 }
 
 void init_Tensor(py::module& m)
@@ -88,7 +89,7 @@ void init_Tensor(py::module& m)
         .def("dims", (const std::vector<DimSize_t>& (Tensor::*)() const) & Tensor::dims)
         .def("dtype", &Tensor::dataType)
         .def("size", &Tensor::size)
-        .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&)) & Tensor::resize)
+        .def("resize", (void(Tensor::*)(const std::vector<DimSize_t>&)) & Tensor::resize)
         .def("has_impl", &Tensor::hasImpl)
         .def("get_coord", &Tensor::getCoord)
         .def("get_idx", &Tensor::getIdx)
@@ -97,7 +98,8 @@ void init_Tensor(py::module& m)
         .def("__len__", [](Tensor& b) -> size_t { return b.size(); })
         .def(
             "__getitem__",
-            [](Tensor& b, size_t idx) -> py::object {
+            [](Tensor& b, size_t idx) -> py::object
+            {
                 if (idx >= b.size())
                     throw py::index_error();
                 switch (b.dataType())
@@ -114,7 +116,8 @@ void init_Tensor(py::module& m)
             })
         .def(
             "__getitem__",
-            [](Tensor& b, std::vector<size_t> coordIdx) -> py::object {
+            [](Tensor& b, std::vector<Coord_t> coordIdx) -> py::object
+            {
                 if (b.getIdx(coordIdx) >= b.size())
                     throw py::index_error();
                 switch (b.dataType())
@@ -129,47 +132,52 @@ void init_Tensor(py::module& m)
                         return py::none();
                 }
             })
-        .def_buffer([](Tensor& b) -> py::buffer_info {
-            const std::unique_ptr<TensorImpl>& tensorImpl = b.getImpl();
+        .def_buffer(
+            [](Tensor& b) -> py::buffer_info
+            {
+                /// @todo const TensorImpl, yet a pointer to writable data is requested
+                /// later: unconsistent?
+                const TensorImpl& tensorImpl = b.getImpl();
 
-            std::vector<size_t> dims;
-            std::vector<size_t> strides;
-            size_t stride = tensorImpl.scalarSize();
+                std::vector<size_t> dims;
+                std::vector<size_t> strides;
+                size_t stride = tensorImpl.scalarSize();
 
-            for (unsigned int dim = b.nbDims(); dim > 0; dim--)
-            {
-                dims.push_back(b.dims()[dim - 1]);
-                strides.push_back(stride);
-                stride *= b.dims()[dim - 1];
-            }
-            std::reverse(dims.begin(), dims.end());
-            std::reverse(strides.begin(), strides.end());
+                for (unsigned int dim = b.nbDims(); dim > 0; dim--)
+                {
+                    dims.push_back(b.dims()[dim - 1]);
+                    strides.push_back(stride);
+                    stride *= b.dims()[dim - 1];
+                }
+                std::reverse(dims.begin(), dims.end());
+                std::reverse(strides.begin(), strides.end());
 
-            std::string dataFormatDescriptor;
-            switch (b.dataType())
-            {
-                case DataType::Float64:
-                    dataFormatDescriptor = py::format_descriptor<double>::format();
-                    break;
-                case DataType::Float32:
-                    dataFormatDescriptor = py::format_descriptor<float>::format();
-                    break;
-                case DataType::Int32:
-                    dataFormatDescriptor = py::format_descriptor<int>::format();
-                    break;
-                default:
-                    throw py::value_error("Unsupported data format");
-            }
+                std::string dataFormatDescriptor;
+                switch (b.dataType())
+                {
+                    case DataType::Float64:
+                        dataFormatDescriptor = py::format_descriptor<double>::format();
+                        break;
+                    case DataType::Float32:
+                        dataFormatDescriptor = py::format_descriptor<float>::format();
+                        break;
+                    case DataType::Int32:
+                        dataFormatDescriptor = py::format_descriptor<int>::format();
+                        break;
+                    default:
+                        throw py::value_error("Unsupported data format");
+                }
 
-            return py::buffer_info(
-                tensorImpl.rawPtr(), /* Pointer to buffer */
-                tensorImpl.scalarSize(), /* Size of one scalar */
-                dataFormatDescriptor, /* Python struct-style format descriptor */
-                b.nbDims(), /* Number of dimensions */
-                dims, /* Buffer dimensions */
-                strides /* Strides (in bytes) for each index */
-            );
-        });
+                return py::buffer_info(
+                    const_cast<void*>(reinterpret_cast<void const*>(
+                        tensorImpl.GetDataAddress())), /* Pointer to buffer */
+                    tensorImpl.scalarSize(), /* Size of one scalar */
+                    dataFormatDescriptor, /* Python struct-style format descriptor */
+                    b.nbDims(), /* Number of dimensions */
+                    dims, /* Buffer dimensions */
+                    strides /* Strides (in bytes) for each index */
+                );
+            });
 
     // TODO : If the ctor with the right data type does not exist, pybind will always
     // convert the data to INT ! Need to find a way to avoid this !
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 13fa49279e4490a9d68ddffc47d431bfe933d2ec..6f5806a748c0b9c850eab2090970c7b7833dd3ed 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#include "aidge/backend/TensorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
+#include "aidge/backend/TensorImpl.hpp"
 
 namespace Aidge
 {
@@ -67,10 +67,16 @@ void Tensor::CopyData(Byte_t const *const srcPtr, std::size_t const Bytes)
     mImpl->copy(srcPtr, Bytes);
 }
 
-std::size_t Tensor::getIdx(std::vector<Coord_t> const &coordIdx) const noexcept
+void Tensor::getCoord(
+    NbElts_t const flatIdx, std::vector<Coord_t> &coordIdx) const noexcept
+{
+    mImpl->getCoord(flatIdx, coordIdx);
+}
+NbElts_t Tensor::getIdx(std::vector<Coord_t> const &coordIdx) const noexcept
 {
     return mImpl->getIdx(coordIdx);
 }
+
 /// @brief Getting the address of the very first data in memory (lexicographic
 /// order), read only access to data
 Byte_t const *Tensor::GetDataAddress() const noexcept