diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 509c11691047604fbce959cfb29649aac75b5a1e..538a6bb27c7af8b380dc1eaba6845bbf1ab42dbf 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -171,21 +171,29 @@ public:
     };
 
     /**
-     * Set the size, in number of elements, that must be stored.
+     * @brief Set the size, in number of elements, that must be stored.
     */
     virtual void resize(std::vector<DimSize_t> dims) {
         mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
     }
 
     /**
-     * Return the number of elements stored.
+     * @brief Return the number of elements stored.
     */
     inline std::size_t size() const noexcept { return mNbElts; }
 
     /**
-     * Return the size (in bytes) of one element (scalar).
+     * @brief Return the size (in bytes) of one element (scalar).
     */
     virtual std::size_t scalarSize() const noexcept = 0;
+
+    /**
+     * @brief Set every element of the implementation to zero.
+     */
+    virtual void zeros() {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Function not implented");
+    }
+
     constexpr const char *backend() const { return mBackend; }
 
     /**
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 549232b2635f48b979208bb2f91b845dacef6f8b..69ebb7bb916a2a6df1267339666b3277a8b5cbf1 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -53,6 +53,15 @@ public:
 
     inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
 
+    void zeros() override final {
+        if (mData.empty()) {
+            lazyInit();
+        }
+        for (std::size_t i = 0; i < mData.size(); ++i) {
+            *(mData.data() + i) = T(0);
+        }
+    }
+
     void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
         const T* srcT = static_cast<const T *>(src);
         T* dstT = static_cast<T *>(rawPtr(offset));
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 95101bb3ad1704f4acb8dd3e46ef7ee450f1f91f..b82ec89d0096d47644e1bb4bd3819536ce7ccd66 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -12,10 +12,12 @@
 #ifndef AIDGE_CORE_DATA_TENSOR_H_
 #define AIDGE_CORE_DATA_TENSOR_H_
 
+#include <cstddef>      // std::size_t
 #include <cstring>
+#include <functional>   // std::multiplies
 #include <set>
 #include <memory>
-#include <numeric>   // std::accumulate
+#include <numeric>      // std::accumulate
 #include <string>
 #include <type_traits>  // std::is_arithmetic
 #include <vector>
@@ -35,15 +37,17 @@ namespace Aidge {
 class Tensor : public Data,
                public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
    private:
-    DataType mDataType; /** enum to specify data type. */
+    DataType mDataType = DataType::Float32; /** enum to specify data type. */
     std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
     std::vector<DimSize_t> mStrides; /** Stride dimensions of the tensor. */
-    std::shared_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */
+    std::shared_ptr<TensorImpl> mImpl = nullptr; /** Pointer to the actual data implementation. */
     std::size_t mImplOffset = 0;
-    std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */
+    std::shared_ptr<Tensor> mGrad = nullptr; /** Pointer to the associated gradient Tensor instance. */
 
     // Cached data
-    std::size_t mSize = 0;    /** Number of elements in the Tensor. */
+    /// @brief Number of elements in the Tensor.
+    std::size_t mSize;
+    /// @brief Whether or not data are contiguous in memory.
     bool mContiguous = true;
 
    public:
@@ -51,64 +55,48 @@ class Tensor : public Data,
 
     /**
      * @brief Construct a new empty Tensor object.
-     * @param dataType Sets the type of inserted data.
+     * It has the features of an undefined scalar.
      */
-    Tensor(DataType dataType = DataType::Float32)
+    Tensor(DataType dtype = DataType::Float32)
         : Data(Type),
-          mDataType(dataType)
+          mDataType(dtype),
+          mDims(std::vector<DimSize_t>({})),
+          mStrides({1}),
+          mSize(1)
     {
         // ctor
     }
 
     /**
-     * @brief Construct a new Tensor object from dimensions.
+     * @brief Construct a new Tensor object from an arithmetic parameter.
      *
-     * @param dims dimensions of the tensor
-     * @param dataType datatype of the tensor (default = DataType::Float32)
+     * @tparam T Type of the input parameter.
+     * @tparam VT Decayed type of the input paramter.
+     * @param val Input value.
      */
-    Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32)
+    template<typename T,
+             typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor(T val)
         : Data(Type),
-          mDataType(dataType),
-          mDims(dims)
+          mDataType(NativeType<VT>::type),
+          mDims({}),
+          mStrides({1}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
+          mSize(1)
     {
-        computeSize();
+        *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
     }
 
     /**
-     * @brief Construct a new Tensor object from another one (shallow copy).
-     * Data memory is not copied, but shared between the new Tensor and the
-     * initial one.
+     * @brief Construct a new Tensor object from dimensions.
      *
-     * @param otherTensor
+     * @param dims dimensions of the tensor
      */
-    Tensor(const Tensor&)            = default;
-    Tensor(Tensor&&)            = default;
-
-    /**
-     * Perform a deep copy of the tensor.
-    */
-    Tensor clone() const {
-        Tensor newTensor(*this);
-        if (!newTensor.isContiguous()) {
-            newTensor.makeContiguous();
-        }
-        else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
-            newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
-            newTensor.setImpl(newImpl);
-        }
-        return newTensor;
-    }
-
-    template<typename T,
-            typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
-    Tensor(T val)
-        : Data(Type),
-          mDataType(NativeType<VT>::type),
-          mDims({}), mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
-          mSize(1) {
-        *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
+    Tensor(const std::vector<DimSize_t>& dims)
+        : Data(Type)
+    {
+        // set mDims, mStrides, mContiguous, mSize
+        resize(dims);
     }
 
     /**
@@ -123,20 +111,11 @@ class Tensor : public Data,
           mDims({SIZE_0}),
           mStrides({1}),
           mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
-          mSize(SIZE_0) {
+          mSize(SIZE_0)
+    {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
     }
 
-    template <typename T, std::size_t SIZE_0>
-    constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
-        resize({SIZE_0});
-        if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
-        }
-        mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
-        return *this;
-    }
-
     /**
      * @brief Construct a new Tensor object from the 2-dimensions Array helper.
      * @tparam T datatype
@@ -154,16 +133,6 @@ class Tensor : public Data,
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
 
-    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
-    constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
-        resize({SIZE_0, SIZE_1});
-        if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
-        }
-        mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
-        return *this;
-    }
-
     /**
      * @brief Construct a new Tensor object from the 3-dimensions Array helper.
      * @tparam T datatype
@@ -182,16 +151,6 @@ class Tensor : public Data,
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
 
-    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
-    constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
-        resize({SIZE_0, SIZE_1, SIZE_2});
-        if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
-        }
-        mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
-        return *this;
-    }
-
     /**
      * @brief Construct a new Tensor object from the 4-dimensions Array helper.
      * @tparam T datatype
@@ -211,15 +170,19 @@ class Tensor : public Data,
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
 
-    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
-    constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
-        resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
-        if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
-        }
-        mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
-        return *this;
-    }
+    /**
+     * @brief Copy constructor. Construct a new Tensor object from another one
+     * (shallow copy). Data memory is not copied, but shared between the new
+     * Tensor and the initial one.
+     * @param other
+     */
+    Tensor(const Tensor& other) = default;
+
+    /**
+     * @brief Move constructor.
+     * @param other
+     */
+    Tensor(Tensor&& other) = default;
 
     /**
      * @brief Copy dimensions, datatype and data from another Tensor.
@@ -227,24 +190,32 @@ class Tensor : public Data,
      * existing implementation. Tensor backend/device remain untouched.
      * If current Tensor does not have an implementation, only a shallow copy
      * is performed and the Tensor will share data with t.
-     * @param t other Tensor object.
+     * @param other other Tensor object.
      * @return Tensor&
      */
-    Tensor &operator=(const Tensor &t) {
-        resize(t.dims(), t.strides());
-        setDataType(t.dataType(), false); // do not convert existing data
-        if (t.hasImpl()) {
-            if (hasImpl()) {
-                copyFrom(t);
-            }
-            else {
-                // Perform a shallow copy only
-                setImpl(t.mImpl, t.mImplOffset);
-            }
-        }
-        else {
-            setImpl(nullptr);
-        }
+    Tensor &operator=(const Tensor& other);
+
+    template <typename T, std::size_t SIZE_0>
+    constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
+        *this = Tensor(std::move(arr));
+        return *this;
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
+    constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
+        *this = Tensor(std::move(arr));
+        return *this;
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
+    constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
+        *this = Tensor(std::move(arr));
+        return *this;
+    }
+
+    template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
+    constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
+        *this = Tensor(std::move(arr));
         return *this;
     }
 
@@ -260,6 +231,23 @@ class Tensor : public Data,
         return *mImpl == *(otherTensor.mImpl);
     }
 
+public:
+    /**
+     * @brief Perform a deep copy of the tensor.
+    */
+    Tensor clone() const {
+        Tensor newTensor(*this);
+        if (!newTensor.isContiguous()) {
+            newTensor.makeContiguous();
+        }
+        else {
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
+            newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
+            newTensor.setImpl(newImpl);
+        }
+        return newTensor;
+    }
+
     /**
      * @brief Set the backend of the Tensor associated implementation. If there
      * was no previous implementation set, data will be allocated, but it will
@@ -292,12 +280,7 @@ class Tensor : public Data,
      * @brief Get a list of available backends.
      * @return std::set<std::string>
      */
-    static std::set<std::string> getAvailableBackends(){
-        std::set<std::string> backendsList;
-        for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
-            backendsList.insert(std::get<0>(tupleKey));
-        return backendsList;
-    }
+    static std::set<std::string> getAvailableBackends();
 
     /**
      * @brief Get the data type enum.
@@ -369,13 +352,13 @@ class Tensor : public Data,
      * @brief Get dimensions of the Tensor object.
      * @return constexpr const std::vector<DimSize_t>&
      */
-    constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
+    constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
 
     /**
      * @brief Get strides of the Tensor object.
      * @return constexpr const std::vector<DimSize_t>&
      */
-    constexpr const std::vector<DimSize_t> &strides() const { return mStrides; }
+    constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
 
     /**
      * @brief Return true if Tensor is contiguous in memory.
@@ -424,6 +407,18 @@ class Tensor : public Data,
      * @return false
      */
     bool empty() const { return mDims.empty(); }
+    // bool newempty() const noexcept {
+    //     return mSize == 0;
+    // }
+
+    /**
+     * @brief Set each element of the tensor to zero.
+     */
+    void zeros() const {
+        if (mImpl) {
+            mImpl->zeros();
+        }
+    }
 
     template <typename expectedType>
     const expectedType& get(std::size_t idx) const {
@@ -455,12 +450,13 @@ class Tensor : public Data,
     inline void print() const { fmt::print("{}\n", toString()); }
 
     std::shared_ptr<Tensor> grad() {
-        if (!mGrad) {
-            mGrad = std::make_shared<Tensor>(mDataType);
-            mGrad->resize(mDims);
+        // if (!mGrad && mImpl) {
+        //     mGrad = std::make_shared<Tensor>(mDims);
+        //     mGrad->setDataType(mDataType);
+        //     mGrad->setBackend(mImpl->backend());
 
-            if (mImpl) mGrad->setBackend(mImpl->backend());
-        }
+        //     // if (mImpl) mGrad->setBackend(mImpl->backend());
+        // }
 
         return mGrad;
     }
@@ -473,13 +469,13 @@ class Tensor : public Data,
      * @return std::vector<DimSize_t>
      */
     std::vector<std::size_t> getCoord(std::size_t flatIdx) const {
-        std::vector<std::size_t> coordIdx = std::vector<std::size_t>(mDims.size());
-        std::size_t idx = flatIdx;
-        for (std::size_t i = mDims.size() - 1; i > 0; --i){
-            coordIdx[i] = (idx % mDims[i]);
-            idx/=mDims[i];
+        std::vector<std::size_t> coordIdx(mDims.size());
+        std::size_t i = mDims.size();
+
+        while (i-- > 0) {
+            coordIdx[i] = (flatIdx % mDims[i]);
+            flatIdx/=mDims[i];
         }
-        coordIdx[0] = idx % mDims[0];
         return coordIdx;
     }
 
@@ -497,7 +493,7 @@ class Tensor : public Data,
         AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
         std::size_t flatIdx = 0;
         std::size_t i = 0;
-        for(; i < coordIdx.size() - 1; ++i){
+        for(; i < coordIdx.size() - 1; ++i) {
             AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
             flatIdx = (flatIdx + coordIdx[i]) * mDims[i + 1];
         }
@@ -513,20 +509,24 @@ class Tensor : public Data,
      * @return DimSize_t Storage index
      */
     std::size_t getStorageIdx(const std::vector<std::size_t>& coordIdx) const {
+        for(std::size_t i = 0; i < coordIdx.size(); ++i) {
+            AIDGE_ASSERT(coordIdx[i] < mDims[i], "Coordinates dimensions does not fit the dimensions of the tensor");
+        }
         AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Coordinates does not match number of dimensions");
-        return std::inner_product(coordIdx.begin(), coordIdx.end(), mStrides.begin(), DimSize_t(0));
+        return std::inner_product(coordIdx.cbegin(), coordIdx.cend(), mStrides.cbegin(), DimSize_t(0));
     }
 
     /**
-     * @brief Returns a sub-tensor with one or more dimension less.
-     * For instance, t.extract({1}) on a CHW tensor will return the HW tensor
+     * @brief Returns a sub-tensor with equal or lower number of dimensions.
+     *
+     * @note For instance, ``t.extract({1})`` on a CHW tensor will return the HW tensor
      * of channel #1.
-     * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor
+     * Likewise, ``t.extract({0, 1})`` on a NCHW tensor will return the HW tensor
      * of batch #0 and channel #1.
-     * No memory copy is performed, the returned tensor does not own the memory.
-     * If the number of coordinates matches the number of dimensions, an empty
+     * @note No memory copy is performed, the returned tensor does not own the memory.
+     * @note If the number of coordinates matches the number of dimensions, a scalar
      * tensor is returned.
-     * It current tensor was contiguous, the returned tensor is garanteed to be
+     * @note If current tensor was contiguous, the returned tensor is garanteed to be
      * contiguous as well.
      *
      * @param coordIdx Coordinates of the sub-tensor to extract
@@ -537,6 +537,8 @@ class Tensor : public Data,
     /**
      * @brief Returns a sub-tensor at some coordinate and with some dimension.
      *
+     * @note Data contiguity of the returned Tensor is not guaranted.
+     *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
      * @return Tensor Sub-tensor.
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 4d8e0dcd7d29b47b7a3591652c6d3002698ab29c..0d5359156bb8ff23a5b4bdaea93d30b65f8ba702 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -9,14 +9,47 @@
  *
  ********************************************************************************/
 
-#include <vector>
+#include "aidge/data/Tensor.hpp"
+
 #include <cstddef>
+#include <vector>
 
-#include "aidge/data/Tensor.hpp"
-#include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
+    resize(other.dims(), other.strides());
+    setDataType(other.dataType(), false); // do not convert existing data
+    if (other.hasImpl()) {
+        if (hasImpl()) {
+            copyFrom(other);
+        }
+        else {
+            // Perform a shallow copy only
+            setImpl(other.mImpl, other.mImplOffset);
+        }
+    }
+    else {
+        setImpl(nullptr);
+    }
+    return *this;
+}
 
 void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
+    // TODO: scalar Tensor not handled
+    if (dims.empty()) { // scalar
+        mDims = std::vector<DimSize_t>(0);
+        mStrides = std::vector<DimSize_t>({1});
+        mContiguous = true;
+
+        computeSize();
+        if (mImpl) {
+            mImpl->resize(mDims);
+        }
+        return;
+    }
+
     bool checkContiguous = true;
     if (strides.empty()) {
         strides.resize(dims.size());
@@ -31,7 +64,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
         AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
     }
 
-    if (mImpl.use_count() > 1) {
+    if (mImpl && mImpl.use_count() > 1) {
         // Here we could also create a new storage for this tensor in this case
         // But, is it more likely that the user really wants this, or that he did a mistake?
         AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
@@ -43,6 +76,11 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vecto
         mContiguous = true;
         if (checkContiguous) {
             std::size_t expectedStride = 1;
+            // std::size_t i = dims.size();
+            // while ((i-- > 0) && (strides[i] == expectedStride)) {
+            //     mContiguous&= (strides[i] == expectedStride);
+            //     expectedStride*= dims[i];
+            // }
             for (std::size_t i = dims.size()-1; i > 0; --i) {
                 if (strides[i] != expectedStride) {
                     mContiguous = false;
@@ -148,26 +186,26 @@ std::string Aidge::Tensor::toString() const {
     return res;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
+Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& fixedCoord) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
-    AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
+    AIDGE_ASSERT(fixedCoord.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
 
     Tensor subTensor(mDataType);
-    subTensor.resize(std::vector<size_t>(mDims.begin() + coordIdx.size(), mDims.end()),
-        std::vector<size_t>(mStrides.begin() + coordIdx.size(), mStrides.end()));
+    subTensor.resize(std::vector<size_t>(mDims.cbegin() + fixedCoord.size(), mDims.cend()),
+        std::vector<size_t>(mStrides.cbegin() + fixedCoord.size(), mStrides.cend()));
     subTensor.setBackend(mImpl->backend(), mImpl->device().second);
-    subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
+    subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(fixedCoord));
     return subTensor;
 }
 
-Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const {
+Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& startCoord, const std::vector<std::size_t>& dims) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
-    AIDGE_ASSERT(coordIdx.size() == mDims.size(), "Coordinates does not match number of dimensions");
+    AIDGE_ASSERT(startCoord.size() == mDims.size(), "Coordinates does not match number of dimensions");
 
     Tensor subTensor(mDataType);
     subTensor.resize(dims, mStrides);
     subTensor.setBackend(mImpl->backend(), mImpl->device().second);
-    subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(coordIdx));
+    subTensor.setImpl(mImpl, mImplOffset + getStorageIdx(startCoord));
     return subTensor;
 }
 
@@ -181,12 +219,12 @@ void Aidge::Tensor::makeContiguous() {
         // Create a new storage that will be contiguous
         std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
         // Copy elements from old to new storage
-        size_t idx = 0;
+        std::size_t idx = 0;
         while (idx < mSize) {
-            const size_t storageIdx = getStorageIdx(getCoord(idx));
+            const std::size_t storageIdx = getStorageIdx(getCoord(idx));
 
             // Determine the size of the contiguous chunk
-            size_t copySize = 1;
+            std::size_t copySize = 1;
             while (idx + copySize < mSize &&
                 getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
             {
@@ -391,3 +429,10 @@ const Aidge::Tensor& Aidge::Tensor::ref(std::shared_ptr<Tensor>& fallback, const
         return *fallback;
     }
 }
+
+std::set<std::string> Aidge::Tensor::getAvailableBackends() {
+    std::set<std::string> backendsList;
+    for(const auto& tupleKey : Registrar<Tensor>::getKeys())
+        backendsList.insert(std::get<0>(tupleKey));
+    return backendsList;
+}
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 6c827f236167c8bce4fd5a39c392f00ac8fe6649..2975538bc3271f4dbf6faea920be3a05452a0859 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -451,16 +451,15 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::ve
         this->generateScheduling(verbose);
     }
 
-    std::map<std::shared_ptr<Node>, std::string> namePtrTable;
-    if (verbose) namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
+    const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
 
     size_t cpt = 0;
     for (const auto& runnable : mStaticSchedule.at(mStaticScheduleStep)) {
         if (verbose)
-            fmt::print("run: {}\n", namePtrTable[runnable]);
+            fmt::print("run: {}\n", namePtrTable.at(runnable));
         else
             drawProgressBar(static_cast<float>(cpt) / static_cast<float>(mStaticSchedule.size()), 50,
-                            (std::string("running ") + namePtrTable[runnable]));
+                            (std::string("running ") + namePtrTable.at(runnable)));
         const auto tStart = std::chrono::high_resolution_clock::now();
         runnable->forward();
         const auto tEnd = std::chrono::high_resolution_clock::now();
diff --git a/unit_tests/backend/Test_TensorImpl.cpp b/unit_tests/backend/Test_TensorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..43e25092a0f502698bbff7b0142969154f2cb0b0
--- /dev/null
+++ b/unit_tests/backend/Test_TensorImpl.cpp
@@ -0,0 +1,61 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <cstddef>
+#include <cstdint>  //std::uint16_t
+#include <random>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[backend/cpu/data] Tensor", "[TensorImpl]") {
+    Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+    SECTION("Access to array") {
+        x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+        REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
+        REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
+    }
+}
+
+TEST_CASE("Tensor fill", "[TensorImpl][fill]") {
+  SECTION("Instantiate batches independantly") {
+    // initialization with 0s
+    std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
+    //concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
+    std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
+    std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
+
+    // use copy function from implementation
+    concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
+    concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
+    concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
+    // concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
+      {{1,2,3,4,5},
+      {6,7,8,9,10},
+      {11,12,13,14,15}}
+    });
+    // expectedTensor->print();
+
+    REQUIRE(*concatenatedTensor == *expectedTensor);
+  }
+}
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..655fd725e9d7d913d24c6552571ae3b91e3605b4
--- /dev/null
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -0,0 +1,412 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+#include <cstddef>     // std::size_t
+#include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
+#include <numeric>     // std::accumulate, std::inner_product
+#include <functional>  // std::multiplies
+#include <random>      // std::random_device, std::mt19937,
+                       // std::uniform_int_distribution, std::uniform_real_distribution
+#include <set>
+#include <string>
+#include <vector>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
+    SECTION("Default constructor") {
+        Tensor T_default{};
+        REQUIRE((
+            (T_default.dataType() == DataType::Float32) &&
+            (T_default.size() == 1) &&
+            (T_default.dims() == std::vector<DimSize_t>({})) &&
+            (T_default.strides() == std::vector<DimSize_t>({1})) &&
+            (T_default.getImpl() == nullptr) &&
+            (T_default.grad() == nullptr) &&
+            (T_default.isContiguous() == true)
+        ));
+    }
+    SECTION("scalar constructor") {
+        Tensor T;
+        REQUIRE_NOTHROW(T = Tensor(std::int32_t(20)));
+        REQUIRE((
+            (T.dataType() == DataType::Int32) &&
+            (T.size() == 1) &&
+            (T.dims() == std::vector<DimSize_t>({})) &&
+            (T.strides() == std::vector<DimSize_t>({1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+    }
+    SECTION("dim constructor") {
+        const std::vector<DimSize_t> Tdims = {1,2,3,4,5,6,7};
+        Tensor T;
+        REQUIRE_NOTHROW(T = Tensor(Tdims));
+        REQUIRE((
+            (T.dataType() == DataType::Float32) &&
+            (T.size() == std::accumulate(Tdims.cbegin(), Tdims.cend(), DimSize_t(1), std::multiplies<DimSize_t>())) &&
+            (T.dims() == Tdims) &&
+            (T.strides() == std::vector<DimSize_t>({5040,2520,840,210,42,7,1})) &&
+            (T.getImpl() == nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+    }
+    SECTION("TensorUtils, constructor from const arrays") {
+        Tensor T;
+        // Construction from different types and sizes
+
+        // Set an already constructed Tensor
+        REQUIRE_NOTHROW(T = Array1D<int, 2>{{1, 2}});
+        REQUIRE((
+            (T.dataType() == DataType::Int32) &&
+            (T.size() == 2) &&
+            (T.dims() == std::vector<DimSize_t>({2})) &&
+            (T.strides() == std::vector<DimSize_t>({1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+
+        // Change dims
+        REQUIRE_NOTHROW(T = Array2D<int, 2, 2>{{{1, 2}, {3, 4}}});
+        // Change data types
+        REQUIRE_NOTHROW(T = Array3D<std::uint8_t, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
+        REQUIRE((
+            (T.dataType() == DataType::UInt8) &&
+            (T.size() == 8) &&
+            (T.dims() == std::vector<DimSize_t>({2,2,2})) &&
+            (T.strides() == std::vector<DimSize_t>({4,2,1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+        REQUIRE_NOTHROW(T = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}});
+        REQUIRE_NOTHROW(T = Array3D<float, 2, 2, 2>{{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}}});
+        REQUIRE_NOTHROW(T = Array3D<double, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}});
+
+        // Change dims
+        REQUIRE_NOTHROW(T = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},
+                                                    {{{9,10}, {11,12}}, {{13,14},{15,16}}}}});
+        REQUIRE((
+            (T.dataType() == DataType::Int32) &&
+            (T.size() == 16) &&
+            (T.dims() == std::vector<DimSize_t>({2,2,2,2})) &&
+            (T.strides() == std::vector<DimSize_t>({8,4,2,1})) &&
+            (T.getImpl() != nullptr) &&
+            (T.grad() == nullptr) &&
+            (T.isContiguous() == true)
+        ));
+    }
+    SECTION("copy constructor / copy assignment operator") {
+
+    }
+    SECTION("move constructor / move assignment operator") {
+
+    }
+    SECTION("prototype") {
+        constexpr std::uint16_t NBTRIALS = 10;
+
+        // Create random number generators
+        std::random_device rd;
+        std::mt19937 gen(rd());
+        std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+        std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+        std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
+
+        for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
+            std::vector<std::size_t> Tdims;
+            const std::size_t Tsize = nbDimsDist(gen);
+            for (std::size_t i = 0; i < Tsize; ++i) {
+                Tdims.push_back(dimsDist(gen));
+            }
+            Tensor T(Tdims);
+
+            // file the tensor
+            std::unique_ptr<float[]> array0(new float[T.size()]);
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                array0[i] = valueDist(gen);
+            }
+            T.setBackend("cpu");
+            T.getImpl() -> setRawPtr(array0.get(), T.size());
+
+            Tensor Tclone;
+            REQUIRE_NOTHROW(Tclone = T.clone());
+            REQUIRE((
+                (T.dataType() == Tclone.dataType()) &&
+                (T.size() == Tclone.size()) &&
+                (T.dims() == Tclone.dims()) &&
+                (T.strides() == Tclone.strides()) &&
+                (T.getImpl() != Tclone.getImpl()) &&
+                (Tclone.grad() == nullptr) &&
+                (Tclone.isContiguous() == true)
+            ));
+            REQUIRE(Tclone == T);
+        }
+    }
+}
+
+TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create random number generators
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+    std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
+
+    for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
+        std::vector<std::size_t> Tdims;
+        const std::size_t Tsize = nbDimsDist(gen);
+        for (std::size_t i = 0; i < Tsize; ++i) {
+            Tdims.push_back(dimsDist(gen));
+        }
+
+        // create Tensor
+        Tensor T(Tdims);
+        // compute stride
+        std::vector<std::size_t> Tstrides(Tdims.size(), 1);
+        std::size_t i = Tdims.size() - 1;
+        while (i-- > 0) {
+            Tstrides[i] = Tstrides[i+1]*Tdims[i+1];
+        }
+
+    /////////////////
+    // dimensions
+        // nbDims(), dims(), size()
+        REQUIRE(T.nbDims() == Tdims.size());
+
+        REQUIRE(T.dims() == Tdims);
+
+        std::size_t trueSize = std::accumulate(Tdims.cbegin(), Tdims.cend(), 1, std::multiplies<std::size_t>());
+        REQUIRE(T.size() == trueSize);
+
+    /////////////////
+    // implementation
+        // getImpl(), setImpl(), hasImpl()
+        REQUIRE(T.hasImpl() == false);
+        std::shared_ptr<TensorImpl_cpu<float>> tensorImpl = std::make_shared<TensorImpl_cpu<float>>(0, Tdims);
+
+        T.setImpl(tensorImpl);
+        REQUIRE(T.getImpl() == tensorImpl);
+        REQUIRE(T.hasImpl() == true);
+
+        // isContiguous(), stride(),
+        REQUIRE(T.isContiguous());
+        REQUIRE(T.strides() == Tstrides);
+
+        // file the tensor
+        std::unique_ptr<float[]> array0(new float[T.size()]);
+        for (std::size_t i = 0; i < T.size(); ++i) {
+            array0[i] = valueDist(gen);
+        }
+        tensorImpl -> setRawPtr(array0.get(), T.size());
+
+        // getCoord(), getIdx(), getStorageIdx()
+        std::vector<DimSize_t> Tdims_copy = Tdims;
+        for (auto& val : Tdims_copy) {
+            val = std::min(DimSize_t(2), std::max(DimSize_t(0), val - 1));
+        }
+        DimSize_t true_flatid = std::inner_product(Tdims_copy.cbegin(), Tdims_copy.cend(), Tstrides.cbegin(), DimSize_t(0));
+
+        REQUIRE(T.getCoord(true_flatid) == Tdims_copy);
+        REQUIRE(T.getIdx(Tdims_copy) == true_flatid);
+        REQUIRE(T.getStorageIdx(Tdims_copy) == true_flatid); // Tensor is not a view
+
+        // set(vector), set(size_t), get(vector), get(size_t), getImplOffset()
+        REQUIRE_NOTHROW(T.set<float>(Tdims_copy, 50.0f));
+        REQUIRE(T.get<float>(Tdims_copy) == 50.0f);
+
+        REQUIRE_NOTHROW(T.set<float>(true_flatid, 40.0f));
+        REQUIRE(T.get<float>(true_flatid) == 40.0f);
+        REQUIRE(T.getImplOffset() == 0);
+
+
+    //////////////
+    // backend
+        // getAvailableBackends()
+        REQUIRE(Tensor::getAvailableBackends() == std::set<std::string>({"cpu"}));
+
+        // setBackend()
+        REQUIRE_NOTHROW(T.setBackend("cpu", 0));
+
+        // setDataType(), dataType()
+        REQUIRE_NOTHROW(T.setDataType(DataType::Int16));
+        REQUIRE(T.dataType() == DataType::Int16);
+    }
+}
+TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
+	// extract, makeContiguous
+	// empty
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create random number generators
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+    std::uniform_real_distribution<float> valueDist(0.001f, 1.0f);
+    // zeros, resize
+    SECTION("zeros") {
+        Tensor T;
+        for (std::size_t trial = 0; trial < NBTRIALS; ++trial) {
+            std::vector<std::size_t> Tdims;
+            const std::size_t Tsize = nbDimsDist(gen);
+            for (std::size_t i = 0; i < Tsize; ++i) {
+                Tdims.push_back(dimsDist(gen));
+            }
+            T.resize(Tdims);
+
+            // file the tensor
+            std::unique_ptr<float[]> array0(new float[T.size()]);
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                array0[i] = valueDist(gen);
+            }
+            T.setBackend("cpu");
+            T.getImpl() -> setRawPtr(array0.get(), T.size());
+            float* res = static_cast<float*>(T.getImpl()->hostPtr());
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                REQUIRE(res[i] == array0[i]);
+            }
+
+            T.zeros();
+            res = static_cast<float*>(T.getImpl()->hostPtr());
+            for (std::size_t i = 0; i < T.size(); ++i) {
+                REQUIRE(res[i] == 0.0f);
+            }
+        }
+    }
+
+    SECTION("Tensor extract") {
+        bool equal;
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            // create Tensor
+            const std::size_t nb_dims = 3;
+            const std::size_t dim0 = dimsDist(gen) + 1; // dim0 >= 2
+            const std::size_t dim1 = dimsDist(gen) + 1;
+            const std::size_t dim2 = dimsDist(gen) + 1;
+            std::vector<std::size_t> dims = {dim0, dim1, dim2};
+            std::unique_ptr<int[]> array0(new int[dim0*dim1*dim2]);
+            for (std::size_t i = 0; i < dim0; ++i) {
+                for (std::size_t j = 0; j < dim1; ++j) {
+                    for (std::size_t k = 0; k < dim2; ++k) {
+                        array0[((i * dim1) + j)*dim2 + k] = valueDist(gen);
+                    }
+                }
+            }
+            Tensor x{dims};
+            x.setDataType(DataType::Int32);
+            x.setBackend("cpu");
+            Tensor y;
+            Tensor y0;
+            Tensor y1;
+            Tensor y2;
+            Tensor y3;
+            x.getImpl()->setRawPtr(array0.get(), dim0*dim1*dim2);
+            REQUIRE(x.isContiguous());
+
+        ////////////////
+        // extract contiguous Tensor slice given start coordinates
+            // the whole Tensor
+            REQUIRE_NOTHROW(y0 = x.extract({}));
+            REQUIRE(y0 == x);
+            int* y0_res = static_cast<int*>(y0.getImpl()->hostPtr());
+            equal = true;
+            for (std::size_t i = 0; i < dim0*dim1*dim2; ++i) {
+                equal &= (y0_res[i] == array0[i]);
+            }
+            REQUIRE(equal);
+            REQUIRE(y0.getImpl() == x.getImpl());
+            REQUIRE(y0.isContiguous());
+
+            // Tensor - 1-D
+            REQUIRE_NOTHROW(y1 = x.extract({dim0 - 2}));
+            int* y1_res = static_cast<int*>(y1.getImpl()->hostPtr());
+            equal = true;
+            for (std::size_t i = 0; i < dim1*dim2; ++i) {
+                equal &= (y1_res[i] == array0[(dim0-2)*dim1*dim2 + i]);
+            }
+            REQUIRE(equal);
+            REQUIRE(y1.getImpl() == x.getImpl());
+            REQUIRE(y1.isContiguous());
+
+            // Tensor - 2-D
+            REQUIRE_NOTHROW(y2 = x.extract({dim0 - 2, dim1 - 2}));
+            int* y2_res = static_cast<int*>(y2.getImpl()->hostPtr());
+            equal = true;
+            for (std::size_t i = 0; i < dim2; ++i) {
+                equal &= (y2_res[i] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + i]);
+            }
+            REQUIRE(equal);
+            REQUIRE(y2.getImpl() == x.getImpl());
+            REQUIRE(y2.isContiguous());
+
+            // Tensor - 3-D => scalar
+            REQUIRE_NOTHROW(y3 = x.extract({dim0 - 2, dim1 - 2, dim2 - 2}));
+            int* y3_res = static_cast<int*>(y3.getImpl()->hostPtr());
+            REQUIRE(y3_res[0] == array0[(((dim0 - 2) * dim1) + (dim1 - 2))*dim2 + dim2 - 2]);
+            REQUIRE(y3.getImpl() == x.getImpl());
+            REQUIRE(y3.isContiguous());
+
+            // throw an error
+            REQUIRE_THROWS(y = x.extract({0, dim1, 0}));
+
+        /////////////////
+        // extract Tensor slice given start coordinates and dimension
+            REQUIRE_NOTHROW(y = x.extract({0, 0, 1}, {dim0-1, 1, dim2-1}));
+            REQUIRE(y.getImpl() == x.getImpl()); // shared implem
+            REQUIRE(!y.isContiguous());
+
+            Tensor yClone = y.clone(); // when copying data, they are contiguous in memory
+            REQUIRE(yClone.isContiguous());
+            // int yTruth[2][1][1] =
+            REQUIRE(approxEq<int>(yClone, y, 0.0f, 0.0f));
+        }
+    }
+
+    // print, toString,
+    SECTION("Pretty printing for debug") {
+        Tensor x{};
+        // Empty Tensor
+        REQUIRE_THROWS(x.print());
+        // scalar
+        x = Tensor(42);
+        REQUIRE_NOTHROW(x.print());
+        // 1-D Tensors
+        x = Array1D<int, 1>{{1}};
+        REQUIRE_NOTHROW(x.print());
+        x = Array1D<int, 6>{{1,2,3,4,5,6}};
+        REQUIRE_NOTHROW(x.print());
+        // 2-D Tensors
+        x = Array2D<int, 3, 2>{{{1, 2}, {3, 4}, {5, 6}}};
+        REQUIRE_NOTHROW(x.print());
+        // +2-D Tensors
+        x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+        REQUIRE_NOTHROW(x.print());
+        x = Array4D<int, 2, 2, 2, 2>{{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}},{{{11, 12}, {13, 14}}, {{15, 16}, {17, 18}}}}};
+        REQUIRE_NOTHROW(x.print());
+    }
+}
+
+} // namespace Aidge
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
deleted file mode 100644
index e734fcd7770483dbcd9f594847ffd4297c071e68..0000000000000000000000000000000000000000
--- a/unit_tests/data/Test_TensorImpl.cpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <array>
-
-#include <catch2/catch_test_macros.hpp>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/utils/TensorUtils.hpp"
-#include "aidge/backend/cpu/data/TensorImpl.hpp"
-
-using namespace Aidge;
-
-TEST_CASE("[core/data] Tensor creation") {
-  SECTION("from const array") {
-    Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
-
-    Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
-
-    Tensor xFloat =
-        Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
-
-    SECTION("Tensor features") {
-      REQUIRE(x.nbDims() == 3);
-      REQUIRE(x.dims()[0] == 2);
-      REQUIRE(x.dims()[1] == 2);
-      REQUIRE(x.dims()[2] == 2);
-      REQUIRE(x.size() == 8);
-    }
-
-    SECTION("Access to array") {
-      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
-      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
-    }
-
-    SECTION("get function") {
-      REQUIRE(x.get<int>({0, 0, 0}) == 1);
-      REQUIRE(x.get<int>({0, 0, 1}) == 2);
-      REQUIRE(x.get<int>({0, 1, 1}) == 4);
-      REQUIRE(x.get<int>({1, 1, 0}) == 7);
-      x.set<int>({1, 1, 1}, 36);
-      REQUIRE(x.get<int>({1, 1, 1}) == 36);
-    }
-
-    SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
-
-    SECTION("Tensor (in)equality") {
-      REQUIRE(x == xCopy);
-      REQUIRE_FALSE(x == xFloat);
-    }
-  }
-}
-
-TEST_CASE("Tensor fill") {
-  SECTION("Instantiate batches independantly") {
-    // initialization with 0s
-    std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
-    //concatenatedTensor->print();
-
-    std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
-    std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
-    std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
-
-    // use copy function from implementation
-    concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
-    concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
-    concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
-    // concatenatedTensor->print();
-
-    std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
-      {{1,2,3,4,5},
-      {6,7,8,9,10},
-      {11,12,13,14,15}}
-    });
-    // expectedTensor->print();
-
-    REQUIRE(*concatenatedTensor == *expectedTensor);
-  }
-}
-
-TEST_CASE("[core/data] Tensor methods","[Tensor]") {
-  Tensor x = Array3D<int, 2, 2, 2>{{
-    {{1, 2},
-     {3, 4}},
-    {{5, 6},
-     {7, 8}}
-  }};
-
-  Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
-
-  Tensor xFloat =
-      Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
-
-  SECTION("Tensor sharing") {
-    Tensor xCopyCtor(x);
-    REQUIRE(xCopyCtor.getImpl() == x.getImpl());
-
-    Tensor xEqOp = x;
-    REQUIRE(xEqOp.getImpl() == x.getImpl());
-
-    Tensor xCloned = x.clone();
-    REQUIRE(xCloned.getImpl() != x.getImpl());
-    REQUIRE(xCloned == x);
-  }
-
-  SECTION("Tensor extract") {
-    Tensor y = x.extract({0, 1});
-    REQUIRE(y.getImpl() == x.getImpl());
-    REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
-    REQUIRE(y.isContiguous());
-
-    Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
-    REQUIRE(y2.getImpl() == x.getImpl());
-    REQUIRE(!y2.isContiguous());
-    Tensor y3 = y2.clone();
-    REQUIRE(y3.isContiguous());
-    REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
-  }
-}