diff --git a/CHANGELOG b/CHANGELOG
new file mode 100644
index 0000000000000000000000000000000000000000..82e90519cc6546e5fa2c2dfa76bc32893d7cad64
--- /dev/null
+++ b/CHANGELOG
@@ -0,0 +1,3 @@
+# Version 0.1.0 (January 23, 2024)
+
+Initial release
diff --git a/aidge_backend_cpu/unit_tests/test_tensor.py b/aidge_backend_cpu/unit_tests/test_tensor.py
index 438b6acd51791a52c9e308fb1aceaefb2a45fb29..37531b43cf7755dfb760e575450b70bfa9a6ff68 100644
--- a/aidge_backend_cpu/unit_tests/test_tensor.py
+++ b/aidge_backend_cpu/unit_tests/test_tensor.py
@@ -16,7 +16,7 @@ class test_tensor(unittest.TestCase):
         self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends())
 
     def test_numpy_int_to_tensor(self):
-        np_array = np.arange(9).reshape(1,1,3,3)
+        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
         self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
@@ -35,6 +35,16 @@ class test_tensor(unittest.TestCase):
         for i,j in zip(t.dims(), nnarray.shape):
             self.assertEqual(i,j)
 
+    def test_numpy_int64_to_tensor(self):
+        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
+        for i_t, i_n in zip(t, np_array.flatten()):
+            self.assertTrue(i_t == i_n)
+        for i,j in zip(t.dims(), np_array.shape):
+            self.assertEqual(i,j)
+
     def test_numpy_float_to_tensor(self):
         t = aidge_core.Tensor()
         np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
@@ -49,7 +59,7 @@ class test_tensor(unittest.TestCase):
     def test_get_set(self):
         dims = [2,2,2]
 
-        np_array = np.arange(8).reshape(dims)
+        np_array = np.arange(8).reshape(dims).astype(np.int32)
         # Numpy -> Tensor
         t = aidge_core.Tensor(np_array)
         for i in range(8):
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 02f1ee5f87b7ba81b371898497df2f2eef4f6d4a..77a62918f9d2d5c9787fb6cb8a6531cd3fb90fe5 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -1,3 +1,14 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
 #ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
 #define AIDGE_CPU_DATA_TENSORIMPL_H_
 
@@ -10,27 +21,28 @@
 #include "aidge/utils/future_std/span.hpp"
 
 namespace Aidge {
+
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
-   private:
+private:
     /// Pointer to the data and its capacity
     future_std::span<T> mData;
     /// If this instance own the data, std::unique_ptr manages it
     std::unique_ptr<T[]> mDataOwner;
 
-   public:
+public:
     static constexpr const char *Backend = "cpu";
 
     TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
 
     bool operator==(const TensorImpl &otherImpl) const override final {
         const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
-        AIDGE_INTERNAL_ASSERT(typedOtherImpl.data().size() >= mNbElts);
+        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
 
         std::size_t i = 0;
         for (; i < mNbElts &&
-               mData[i] == typedOtherImpl.data()[i];
-             ++i) {
+               *(mData.data()+i) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
+               ++i) {
         }
         return i == mNbElts;
     }
@@ -39,12 +51,12 @@ class TensorImpl_cpu : public TensorImpl {
         return std::make_shared<TensorImpl_cpu<T>>(device, length);
     }
 
-    // native interface
-    const future_std::span<T>& data() const { return mData; }
+    inline std::size_t size() const noexcept override final { return mData.size(); }
+    inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
 
     std::size_t scalarSize() const override { return sizeof(T); }
 
-    void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override {
+    void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
         const T* srcT = static_cast<const T *>(src);
         T* dstT = static_cast<T *>(rawPtr(offset));
 
@@ -53,94 +65,97 @@ class TensorImpl_cpu : public TensorImpl {
         std::copy(srcT, srcT + length, dstT);
     }
 
-    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override {
+    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
         if (length == 0) {
             return;
         }
 
         T* dstT = static_cast<T *>(rawPtr(offset));
         AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
-        if (srcDt == DataType::Float64) {
-            std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::Float32) {
-            std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::Float16) {
-            std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::Int64) {
-            std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::UInt64) {
-            std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::Int32) {
-            std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::UInt32) {
-            std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::Int16) {
-            std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::UInt16) {
-            std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::Int8) {
-            std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
-                    dstT);
-        }
-        else if (srcDt == DataType::UInt8) {
-            std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
-                    dstT);
-        }
-        else {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
+        switch (srcDt)
+        {
+            case DataType::Float64:
+                std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Float32:
+                std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Float16:
+                std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int64:
+                std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt64:
+                std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int32:
+                std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt32:
+                std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int16:
+                std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt16:
+                std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int8:
+                std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt8:
+                std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                        dstT);
+                break;
+            default:
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
+                break;
         }
     }
 
-    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override {
+    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
         AIDGE_ASSERT(device.first == Backend, "backend must match");
         AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
         copy(src, length, offset);
     }
 
-    void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override {
+    inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
         copy(src, length, offset);
     }
 
-    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override {
+    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
         const T* src = static_cast<const T*>(rawPtr(offset));
         AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
         std::copy(src, src + length, static_cast<T *>(dst));
     }
 
-    void *rawPtr(NbElts_t offset = 0) override {
+    void *rawPtr(NbElts_t offset = 0) override final {
         lazyInit();
         return (mData.data() + offset);
     };
 
-    const void *rawPtr(NbElts_t offset = 0) const override {
+    const void *rawPtr(NbElts_t offset = 0) const override final {
         AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
         return (mData.data() + offset);
     };
 
-    void *hostPtr(NbElts_t offset = 0) override {
+    void *hostPtr(NbElts_t offset = 0) override final {
         lazyInit();
         return (mData.data() + offset);
     };
 
-    const void *hostPtr(NbElts_t offset = 0) const override {
+    const void *hostPtr(NbElts_t offset = 0) const override final {
         AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
         return (mData.data() + offset);
     };
@@ -173,6 +188,8 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
         {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
         {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
+        {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
 }  // namespace
 }  // namespace Aidge