diff --git a/CHANGELOG b/CHANGELOG
index 82e90519cc6546e5fa2c2dfa76bc32893d7cad64..dc073620be324b16d904e0b05aec38f128c9b2e9 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,7 @@
+# Version 0.1.1 (January 29, 2024)
+
+[Add] Support of a negative value for Reshape Operator shape attribute.
+
 # Version 0.1.0 (January 23, 2024)
 
 Initial release
diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 874e1a005a60aa567c7c0a5b470e5f36aa83291f..c541ae0e03459a0a7200795bc2d3c6b70c13be3b 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -125,6 +125,23 @@ class test_operator_binding(unittest.TestCase):
         generic_op.forward() # Increment idx
         self.assertEqual(customImpl.idx, 1)
 
+    def test_magic_meth(self):
+        myVar = 2
+        myBool = True
+        # Test dynamic attribute set
+        gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator()
+        gop.myBool = myBool
+        # Test variable set by kwargs
+        self.assertEqual(gop.myVar, myVar)
+        # Test set attr
+        self.assertEqual(gop.myBool, myBool)
+
+        # Test static attribute set !
+        prod = aidge_core.Producer([1]).get_operator()
+        self.assertEqual(prod.Constant, False)
+        prod.Constant = True # By default Constant is False
+        self.assertEqual(prod.Constant, True)
+
 
 
 if __name__ == '__main__':
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 620beb160fb3494f156c1a4b512d386447081154..e4d2cb4faca3dda64cff6aea541c30787c23d0ad 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -39,12 +39,6 @@ class test_attributes(unittest.TestCase):
         self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
         self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
-    def test_matmul(self):
-        in_channels = 4
-        out_channels = 8
-        matmul_op = aidge_core.MatMul(in_channels, out_channels).get_operator()
-        self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
-
     def test_producer_1D(self):
         dims = [5]
         producer_op = aidge_core.Producer(dims).get_operator()
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
index 26ae544d6e05f2f9a9da371d3617f9265a037364..cc571d8e5db1beae7fbdb0047c8ae7ced3339fc9 100644
--- a/aidge_core/unit_tests/test_recipies.py
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -45,9 +45,9 @@ class test_recipies(unittest.TestCase):
         self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
 
     def test_fuse_matmul_add(self):
-        matmul0 = aidge_core.MatMul(1, 1, name="MatMul0")
+        matmul0 = aidge_core.MatMul(name="MatMul0")
         add0 = aidge_core.Add(2, name="Add0")
-        matmul1 = aidge_core.MatMul(1, 1, name="MatMul1")
+        matmul1 = aidge_core.MatMul(name="MatMul1")
         add1 = aidge_core.Add(2, name="Add1")
 
         graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py
index a214a0e354c64b515d0a7ac24d81c85e116938ca..d479c98b20534daa804f6019b63d528883c2b568 100644
--- a/aidge_core/unit_tests/test_tensor.py
+++ b/aidge_core/unit_tests/test_tensor.py
@@ -10,16 +10,16 @@ SPDX-License-Identifier: EPL-2.0
 
 import unittest
 import aidge_core
-
 from functools import reduce
+
 import numpy as np
 
+
 class test_tensor(unittest.TestCase):
-    """
+    """Test tensor binding
     """
     def setUp(self):
         pass
-
     def tearDown(self):
         pass
 
@@ -35,10 +35,60 @@ class test_tensor(unittest.TestCase):
             idx = t.get_idx(coord)
             self.assertEqual(idx, i)
 
-if __name__ == '__main__':
-    unittest.main()
+    def test_getavailable_backends(self):
+        self.assertTrue("cpu" in aidge_core.Tensor.get_available_backends())
+
+    def test_numpy_int_to_tensor(self):
+        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.DataType.Int32)
+        for i_t, i_n in zip(t, np_array.flatten()):
+            self.assertTrue(i_t == i_n)
+        for i,j in zip(t.dims(), np_array.shape):
+            self.assertEqual(i,j)
+    def test_tensor_int_to_numpy(self):
+        np_array = np.arange(9).reshape(1,1,3,3)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        # Tensor -> Numpy
+        nnarray = np.array(t)
+        for i_nn, i_n in zip(nnarray.flatten(), np_array.flatten()):
+            self.assertTrue(i_nn == i_n)
+        for i,j in zip(t.dims(), nnarray.shape):
+            self.assertEqual(i,j)
 
+    def test_numpy_int64_to_tensor(self):
+        np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.DataType.Int64)
+        for i_t, i_n in zip(t, np_array.flatten()):
+            self.assertTrue(i_t == i_n)
+        for i,j in zip(t.dims(), np_array.shape):
+            self.assertEqual(i,j)
 
+    def test_numpy_float_to_tensor(self):
+        t = aidge_core.Tensor()
+        np_array = np.random.rand(1, 1, 3, 3).astype(np.float32)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        self.assertEqual(t.dtype(), aidge_core.DataType.Float32)
+        for i_t, i_n in zip(t, np_array.flatten()):
+            self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference
+        for i,j in zip(t.dims(), np_array.shape):
+            self.assertEqual(i,j)
 
+    def test_get_set(self):
+        dims = [2,2,2]
 
+        np_array = np.arange(8).reshape(dims).astype(np.int32)
+        # Numpy -> Tensor
+        t = aidge_core.Tensor(np_array)
+        for i in range(8):
+            self.assertEqual(t[i], i)
+            t[i] = 5
+            self.assertEqual(t[i], 5)
 
+if __name__ == '__main__':
+    unittest.main()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index cc0979b07b07c2b95515eda09fda68a9ec4ac63e..9e0e457b49fe40b2a6e9e3ce5c5e4b77bee1d93e 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -14,10 +14,15 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/StimulusImpl.hpp"
+
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
-
+#include "aidge/data/Database.hpp"
+#include "aidge/data/DataProvider.hpp"
 #include "aidge/graph/Connector.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
@@ -58,6 +63,7 @@
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
+#include "aidge/stimuli/Stimulus.hpp"
 
 #include "aidge/recipies/Recipies.hpp"
 
@@ -66,7 +72,5 @@
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-//#include "aidge/utilsParsing/AstNode.hpp"
-//#include "aidge/utilsParsing/ParsingToken.hpp"
 
 #endif /* AIDGE_IMPORTS_H_ */
diff --git a/include/aidge/backend/StimulusImpl.hpp b/include/aidge/backend/StimulusImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fbdf57b1587d76160c0cb146b6fe9da6947541dc
--- /dev/null
+++ b/include/aidge/backend/StimulusImpl.hpp
@@ -0,0 +1,32 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
+#define AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Base class to implement data loading functions.
+ */
+class StimulusImpl {
+public:
+    virtual ~StimulusImpl() noexcept = default;
+
+    virtual std::shared_ptr<Tensor> load() const = 0;
+};
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_BACKEND_STIMULUSIMPL_H_ */
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 782294a66b6c162e8a18345f2de7b9e6a560a560..545e6c672705fe16186d41f9c46cde94b3f3ab7e 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -67,7 +67,10 @@ private:
 class TensorImpl {
 public:
     TensorImpl() = delete;
-    TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {};
+    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) 
+    {
+        resize(dims);
+    };
 
     /**
      * Return the (backend, device) pair for this implementation.
@@ -147,8 +150,12 @@ public:
     /**
      * Set the size, in number of elements, that must be stored.
     */
-    void resize(NbElts_t length) {
-        mNbElts = length;
+    virtual void resize(std::vector<DimSize_t> dims) {
+        size_t product = 1;
+        for (size_t num : dims) {
+            product *= num;
+        }
+        mNbElts = product;
     }
 
     /**
diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
new file mode 100644
index 0000000000000000000000000000000000000000..b3e0fd967457585e7e3719f92dde7d6d93eee903
--- /dev/null
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
+#define AIDGE_CPU_DATA_GETCPUPTR_H_
+
+#include <cstddef>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data, const std::size_t offset = 0) {
+  const auto tensor = std::static_pointer_cast<Tensor>(data);
+  return tensor->getImpl()->hostPtr(tensor->getImplOffset() + offset);
+}
+} // namespace Aidge
+
+#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..549232b2635f48b979208bb2f91b845dacef6f8b
--- /dev/null
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -0,0 +1,201 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_DATA_TENSORIMPL_H_
+#define AIDGE_CPU_DATA_TENSORIMPL_H_
+
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/half.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/future_std/span.hpp"
+
+namespace Aidge {
+
+template <class T>
+class TensorImpl_cpu : public TensorImpl {
+private:
+    /// Pointer to the data and its capacity
+    future_std::span<T> mData;
+    /// If this instance own the data, std::unique_ptr manages it
+    std::unique_ptr<T[]> mDataOwner;
+
+public:
+    static constexpr const char *Backend = "cpu";
+
+    TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {}
+
+    bool operator==(const TensorImpl &otherImpl) const override final {
+        const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
+        AIDGE_INTERNAL_ASSERT(typedOtherImpl.size() >= mNbElts);
+
+        std::size_t i = 0;
+        for (; i < mNbElts &&
+               *static_cast<const T*>(rawPtr(i)) == *static_cast<const T*>(typedOtherImpl.rawPtr(i));
+               ++i) {
+        }
+        return i == mNbElts;
+    }
+
+    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
+        return std::make_shared<TensorImpl_cpu<T>>(device, dims);
+    }
+
+    inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
+
+    void copy(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+        const T* srcT = static_cast<const T *>(src);
+        T* dstT = static_cast<T *>(rawPtr(offset));
+
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        AIDGE_ASSERT(dstT < srcT || dstT >= srcT + length, "overlapping copy is not supported");
+        std::copy(srcT, srcT + length, dstT);
+    }
+
+    void copyCast(const void *src, const DataType srcDt, NbElts_t length, NbElts_t offset = 0) override final {
+        if (length == 0) {
+            return;
+        }
+
+        T* dstT = static_cast<T *>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        switch (srcDt)
+        {
+            case DataType::Float64:
+                std::copy(static_cast<const double*>(src), static_cast<const double*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Float32:
+                std::copy(static_cast<const float*>(src), static_cast<const float*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Float16:
+                std::copy(static_cast<const half_float::half*>(src), static_cast<const half_float::half*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int64:
+                std::copy(static_cast<const int64_t*>(src), static_cast<const int64_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt64:
+                std::copy(static_cast<const uint64_t*>(src), static_cast<const uint64_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int32:
+                std::copy(static_cast<const int32_t*>(src), static_cast<const int32_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt32:
+                std::copy(static_cast<const uint32_t*>(src), static_cast<const uint32_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int16:
+                std::copy(static_cast<const int16_t*>(src), static_cast<const int16_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt16:
+                std::copy(static_cast<const uint16_t*>(src), static_cast<const uint16_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::Int8:
+                std::copy(static_cast<const int8_t*>(src), static_cast<const int8_t*>(src) + length,
+                        dstT);
+                break;
+            case DataType::UInt8:
+                std::copy(static_cast<const uint8_t*>(src), static_cast<const uint8_t*>(src) + length,
+                        dstT);
+                break;
+            default:
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsupported data type.");
+                break;
+        }
+    }
+
+    void copyFromDevice(const void *src, const std::pair<std::string, DeviceIdx_t>& device, NbElts_t length, NbElts_t offset = 0) override final {
+        AIDGE_ASSERT(device.first == Backend, "backend must match");
+        AIDGE_ASSERT(device.second == 0, "device cannot be != 0 for CPU backend");
+        copy(src, length, offset);
+    }
+
+    inline void copyFromHost(const void *src, NbElts_t length, NbElts_t offset = 0) override final {
+        copy(src, length, offset);
+    }
+
+    void copyToHost(void *dst, NbElts_t length, NbElts_t offset = 0) const override final {
+        const T* src = static_cast<const T*>(rawPtr(offset));
+        AIDGE_ASSERT(length <= mData.size() || length <= mNbElts, "copy length is above capacity");
+        std::copy(src, src + length, static_cast<T *>(dst));
+    }
+
+    void *rawPtr(NbElts_t offset = 0) override final {
+        lazyInit();
+        return (mData.data() + offset);
+    };
+
+    const void *rawPtr(NbElts_t offset = 0) const override final {
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const rawPtr");
+        return (mData.data() + offset);
+    };
+
+    void *hostPtr(NbElts_t offset = 0) override final {
+        lazyInit();
+        return (mData.data() + offset);
+    };
+
+    const void *hostPtr(NbElts_t offset = 0) const override final {
+        AIDGE_ASSERT(mData.size() >= mNbElts, "accessing uninitialized const hostPtr");
+        return (mData.data() + offset);
+    };
+
+    void setRawPtr(void *ptr, NbElts_t length) override final {
+        AIDGE_ASSERT(length >= mNbElts, "trying to set raw pointer of insufficient capacity");
+        mData = future_std::span<T>(static_cast<T *>(ptr), length);
+        mDataOwner.reset();
+    };
+
+    virtual ~TensorImpl_cpu() = default;
+
+private:
+    void lazyInit() {
+        if (mData.size() < mNbElts) {
+            // Need more data, a re-allocation will occur
+            AIDGE_ASSERT(mData.empty() || mDataOwner != nullptr, "trying to enlarge non-owned data");
+            mDataOwner.reset(new T[mNbElts]);
+            mData = future_std::span<T>(mDataOwner.get(), mNbElts);
+        }
+    }
+};
+
+namespace {
+static Registrar<Tensor> registrarTensorImpl_cpu_Float64(
+        {"cpu", DataType::Float64}, Aidge::TensorImpl_cpu<double>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
+        {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
+        {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
+        {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
+        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
+        {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
+        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
+        {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
+        {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5c7a1c73ce4ad4eb512a446879cb1ad9b673eb2f
--- /dev/null
+++ b/include/aidge/data/DataProvider.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATAPROVIDER_H_
+#define AIDGE_CORE_DATA_DATAPROVIDER_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>   // std::shared_ptr
+#include <string>
+#include <vector>   // std::vector
+
+#include "aidge/data/Database.hpp"
+#include "aidge/data/Data.hpp"
+
+
+
+namespace Aidge {
+
+/**
+ * @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
+ * @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
+ * @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
+ */
+class DataProvider {
+private:
+    // Dataset providing the data to the dataProvider
+    const Database& mDatabase;
+
+    const std::size_t mNumberModality;
+    std::vector<std::vector<std::size_t>> mDataSizes;
+    std::vector<std::string> mDataBackends;
+    std::vector<DataType> mDataTypes;
+
+    // Desired size of the produced batches
+    const std::size_t mBatchSize;
+
+public:
+    /**
+     * @brief Constructor of Data Provider.
+     * @param database database from which to load the data.
+     * @param batchSize number of data samples per batch.
+     */
+    DataProvider(const Database& database, const std::size_t batchSize);
+
+public:
+    /**
+     * @brief Create a batch for each data modality in the database. The returned batch contain the data as sorted in the database.
+     * @param startIndex the starting index in the database to start the batch from.
+     * @return a vector of tensors. Each tensor is a batch corresponding to one modality.
+     */
+    std::vector<std::shared_ptr<Tensor>> readBatch(const std::size_t startIndex) const;
+};
+
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATAPROVIDER_H_ */
diff --git a/include/aidge/data/Database.hpp b/include/aidge/data/Database.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..edd4b4639fb415dfd723aca987ae754f6d5ccc63
--- /dev/null
+++ b/include/aidge/data/Database.hpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATABASE_H_
+#define AIDGE_CORE_DATA_DATABASE_H_
+
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Abstract class representing a map from a key to data.
+ * All databases should inherit from this class. All subclasses should overwrite
+ * :cpp:function:`Database::getItem` to fetch data from a given index.
+ */
+class Database {
+public:
+    Database() = default;
+    virtual ~Database() noexcept = default;
+
+    /**
+     * @brief Fetch an item of the database.
+     * @param index index of the item.
+     * @return vector of data mapped to index.
+     */
+    virtual std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const = 0;
+
+    /**
+     * @brief Get the number of items in the database
+     *
+     * @return std::size_t
+     */
+    virtual std::size_t getLen() const noexcept = 0;
+
+    /**
+     * @brief Get the number of modalities in one database item
+     *
+     * @return std::size_t
+     */
+    virtual std::size_t getNbModalities() const noexcept = 0;
+
+};
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATABASE_H_ */
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 658c0b497d9753f1bdfd42a274dbb48970cb6d6b..641c44c03b4e33f210e53f7822dd2d26c5a7d32f 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -32,7 +32,7 @@ namespace Aidge {
  * Contains a pointer to an actual contiguous implementation of data.
  */
 class Tensor : public Data,
-               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)> {
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
    private:
     DataType mDataType; /** enum to specify data type. */
     std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
@@ -59,11 +59,25 @@ class Tensor : public Data,
         // ctor
     }
 
+    /**
+     * @brief Construct a new Tensor object from dimensions.
+     *
+     * @param dims dimensions of the tensor
+     * @param dataType datatype of the tensor (default = DataType::Float32)
+     */
+    Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32)
+        : Data(Type),
+          mDataType(dataType),
+          mDims(dims)
+    {
+        computeSize();
+    }
+
     /**
      * @brief Construct a new Tensor object from another one (shallow copy).
      * Data memory is not copied, but shared between the new Tensor and the
      * initial one.
-     * 
+     *
      * @param otherTensor
      */
     Tensor(const Tensor&)            = default;
@@ -78,7 +92,7 @@ class Tensor : public Data,
             newTensor.makeContiguous();
         }
         else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
             newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
             newTensor.setImpl(newImpl);
         }
@@ -96,7 +110,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
           mSize(SIZE_0) {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
     }
@@ -105,7 +119,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
         resize({SIZE_0});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
         }
         mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
         return *this;
@@ -123,7 +137,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
           mSize(SIZE_0 * SIZE_1) {
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
@@ -132,7 +146,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
         resize({SIZE_0, SIZE_1});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
         }
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
         return *this;
@@ -151,7 +165,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2) {
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
@@ -160,7 +174,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
         }
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
         return *this;
@@ -180,7 +194,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
@@ -189,7 +203,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         }
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
         return *this;
@@ -250,7 +264,7 @@ class Tensor : public Data,
             if (mImpl->device() != std::make_pair(name, device)) {
                 // Backend change: create new impl, copy from old to new and replace
                 // impl
-                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mImpl->size());
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
                 if (copyFrom) {
                     newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
                 }
@@ -258,7 +272,7 @@ class Tensor : public Data,
             }
         }
         else {
-            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mSize);
+            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
         }
     }
 
@@ -288,7 +302,7 @@ class Tensor : public Data,
      */
     void setDataType(const DataType dt, bool copyCast = true) {
         if (mImpl && (dataType() != dt)) {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mImpl->size());
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mDims);
             if (copyCast) {
                 newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size());
             }
@@ -306,7 +320,7 @@ class Tensor : public Data,
 
     /**
      * @brief Set the Impl object
-     * 
+     *
      * @param impl New impl shared pointer
      * @param implOffset Storage offset in this new impl for this Tensor
      */
@@ -375,7 +389,7 @@ class Tensor : public Data,
      * @param dims New dimensions
      */
     template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
-    void resize(const std::array<DimSize_t, DIM> &dims) {
+    inline void resize(const std::array<DimSize_t, DIM> &dims) {
         resize(std::vector<DimSize_t>(dims.begin(), dims.end()));
     }
 
@@ -390,48 +404,7 @@ class Tensor : public Data,
      * @param dims New dimensions
      * @param strides Stride of the tensor (if not specified, "nested" stride is used)
      */
-    void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>()) {
-        bool checkContiguous = true;
-        if (strides.empty()) {
-            strides.resize(dims.size());
-            size_t expectedStride = 1;
-            for (int dim = dims.size() - 1; dim >= 0; --dim) {
-                strides[dim] = expectedStride;
-                expectedStride*= dims[dim];
-            }
-            checkContiguous = false;
-        }
-        else {
-            AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
-        }
-
-        if (mImpl.use_count() > 1) {
-            // Here we could also create a new storage for this tensor in this case
-            // But, is it more likely that the user really wants this, or that he did a mistake?
-            AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
-        }
-        else {
-            mDims = dims;
-            mStrides = strides;
-
-            mContiguous = true;
-            if (checkContiguous) {
-                size_t expectedStride = 1;
-                for (int dim = dims.size() - 1; dim >= 0; --dim) {
-                    if (strides[dim] != expectedStride) {
-                        mContiguous = false;
-                        break;
-                    }
-                    expectedStride*= dims[dim];
-                }
-            }
-
-            computeSize();
-            if (mImpl) {
-                mImpl->resize(mSize);
-            }
-        }
-    }
+    void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
 
     /**
      * @brief Return if the Tensor object has at leastone element.
@@ -465,95 +438,7 @@ class Tensor : public Data,
         set<expectedType>(getStorageIdx(coordIdx), value);
     }
 
-
-
-    std::string toString() const {
-        AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
-
-        // TODO: move lambda elsewhere?
-        auto ptrToString = [](DataType dt, void* ptr, size_t idx) {
-            switch (dt) {
-            case DataType::Float64:
-                return std::to_string(static_cast<double*>(ptr)[idx]);
-            case DataType::Float32:
-                return std::to_string(static_cast<float*>(ptr)[idx]);
-            case DataType::Float16:
-                return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
-            case DataType::Int8:
-                return std::to_string(static_cast<int8_t*>(ptr)[idx]);
-            case DataType::Int16:
-                return std::to_string(static_cast<int16_t*>(ptr)[idx]);
-            case DataType::Int32:
-                return std::to_string(static_cast<int32_t*>(ptr)[idx]);
-            case DataType::Int64:
-                return std::to_string(static_cast<int64_t*>(ptr)[idx]);
-            case DataType::UInt8:
-                return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
-            case DataType::UInt16:
-                return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
-            case DataType::UInt32:
-                return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
-            case DataType::UInt64:
-                return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
-            default:
-                AIDGE_ASSERT(true, "unsupported type to convert to string");
-            }
-            return std::string("?");  // To make Clang happy
-        };
-
-        if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
-        std::string res;
-        std::size_t dim = 0;
-        std::size_t counter = 0;
-        if (nbDims()>=2) {
-            std::vector<std::size_t> dimVals(nbDims(), 0);
-            res += "{\n";
-            while (counter < mSize) {
-                std::string spaceString = std::string((dim+1)<<1,' ');
-                if (dim < nbDims()-2) {
-                    if (dimVals[dim] == 0) {
-                        res += spaceString + "{\n";
-                        ++dim;
-                    } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
-                        res += spaceString + "},\n" + spaceString + "{\n";
-                        ++dim;
-                    } else {
-                        res += spaceString + "}\n";
-                        dimVals[dim--] = 0;
-                        dimVals[dim]++;
-                    }
-                } else {
-                    for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
-                        res += spaceString + "{";
-                        for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
-                            res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
-                        }
-                        res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
-                        if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
-                            res += ",";
-                        }
-                        res += "\n";
-                    }
-                    if (dim == 0) {
-                        break;
-                    }
-                    dimVals[dim--] = 0;
-                    dimVals[dim]++;
-                }
-            }
-
-            for(int i = static_cast<int>(dim); i > 0; --i) {
-                res += std::string((dim+1)<<1,' ') + "}\n";
-            }
-        } else {
-            res += "{";
-            for (DimSize_t j = 0; j < dims()[0]; ++j) {
-                res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
-            }
-        }
-        res += "}";
-        return res;
-    }
+    std::string toString() const;
 
     inline void print() const { printf("%s\n", toString().c_str()); }
 
@@ -621,7 +506,7 @@ class Tensor : public Data,
     }
 
     /**
-     * Returns a sub-tensor with one or more dimension less.
+     * @brief Returns a sub-tensor with one or more dimension less.
      * For instance, t.extract({1}) on a CHW tensor will return the HW tensor
      * of channel #1.
      * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor
@@ -631,15 +516,15 @@ class Tensor : public Data,
      * tensor is returned.
      * It current tensor was contiguous, the returned tensor is garanteed to be
      * contiguous as well.
-     * 
+     *
      * @param coordIdx Coordinates of the sub-tensor to extract
      * @return Tensor Sub-tensor.
     */
     Tensor extract(const std::vector<std::size_t>& coordIdx) const;
 
     /**
-     * Returns a sub-tensor at some coordinate and with some dimension.
-     * 
+     * @brief Returns a sub-tensor at some coordinate and with some dimension.
+     *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
      * @return Tensor Sub-tensor.
@@ -647,7 +532,7 @@ class Tensor : public Data,
     Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const;
 
     /**
-     * Make the tensor's storage contiguous, if it is not already the case.
+     * @brief Make the tensor's storage contiguous, if it is not already the case.
      * If not contiguous, a new memory space is allocated.
     */
     void makeContiguous();
@@ -704,7 +589,7 @@ class Tensor : public Data,
      * The data type, backend and device stay the same.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
-     * will occur if fallback has already been allocated with the right 
+     * will occur if fallback has already been allocated with the right
      * type/size/device.
      * @return Reference to either itself or to fallback.
     */
@@ -782,10 +667,10 @@ class Tensor : public Data,
     }
 
     /**
-     * Return a reference to a Tensor on desired data type and backend/device:
+     * @brief Return a reference to a Tensor on desired data type and backend/device:
      * - itself, if already with the right characteristics;
      * - the provided Tensor, overwritten with the right characteristics.
-     * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on
+     * @note no data is copy-casted. If it was so in a previous refCastFrom() on
      * the same fallback, it remains valid, otherwise, data is invalid.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
@@ -800,11 +685,11 @@ class Tensor : public Data,
     const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const;
 
     /**
-     * Return a reference to a Tensor with same characteristics
+     * @brief Return a reference to a Tensor with same characteristics
      * (data type, backend/device) as targetReqs Tensor:
      * - itself, if already with the right characteristics;
      * - the provided Tensor, overwritten with the right characteristics.
-     * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on
+     * @note no data is copy-casted. If it was so in a previous refCastFrom() on
      * the same fallback, it remains valid, otherwise, data is invalid.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
@@ -819,7 +704,11 @@ class Tensor : public Data,
     }
 
 private:
-    ///\bug not protected against overflow
+    /**
+     * @brief Compute the number of elements in the Tensor.
+     * @note If dimensions are not empty, they are multiplied to get the total number
+     * of elements. Else, the Tensor represents a scalar and contains a single element.
+     */
     void computeSize() {
         mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
     }
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 9dccb5440e0b9dc1d102798322656791e5f845fd..a1eacf74aa5273af75209db6fe0069fa33b2d29f 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -218,7 +218,7 @@ public:
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
      */
-    void forwardDims();
+    void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string &backend, DeviceIdx_t device = 0);
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 20082eed28825ade9d62fb5d4e081840d3bd4442..f6647f99151304d0cf083aed109cc642c9f1ecc2 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -27,25 +27,26 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class GatherAttr { Axis };
+enum class GatherAttr { Indices, GatheredShape, Axis };
 
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
                                    std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
-                public StaticAttributes<GatherAttr, int> {
+                public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> {
 
 public:
     static const std::string Type;
 
     Gather_Op() = delete;
 
-
-    using Attributes_ = StaticAttributes<GatherAttr, int>;
+    using Attributes_ = StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t>;
     template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
-    Gather_Op(int axis)
-            : OperatorTensor(Type, 2, 0, 1),
+    Gather_Op(const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis)
+            : OperatorTensor(Type, 1, 0, 1),
             Attributes_(
+                attr<GatherAttr::Indices>(indices),
+                attr<GatherAttr::GatheredShape>(gatheredShape),
                 attr<GatherAttr::Axis>(axis))
     {}
 
@@ -76,21 +77,21 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input", "indexes"};
+        return {"data_input"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
     }
 };
 
-inline std::shared_ptr<Node> Gather(int axis = 0, const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis), name);
+inline std::shared_ptr<Node> Gather( const std::vector<std::int64_t>& indices, const std::vector<DimSize_t>& gatheredShape, std::int64_t axis = 0, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(indices, gatheredShape, axis), name);
 }
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis"};
+const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Indices", "GatheredShape", "Axis"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 449235712dd2867c4644ff9cbecb029778e508e2..49ff7b0bc3ead25ff5a01849be0112a8262f25cb 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -64,7 +64,7 @@ public:
     static const ComputeDimsFunc Identity;
     static const ComputeDimsFunc InputIdentity(IOIndex_t inputIdx, IOIndex_t nbOutputs);
 
-    void setComputeOutputDims(ComputeDimsFunc func) {
+    inline void setComputeOutputDims(ComputeDimsFunc func) {
         mComputeOutputDims = func;
     }
 
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 1376624a34601b716afc49a0b4f9d3ddc6735654..a011c8666bba55eb7254a8efcd432a3f680cd461 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -12,49 +12,32 @@
 #ifndef AIDGE_CORE_OPERATOR_MATMUL_H_
 #define AIDGE_CORE_OPERATOR_MATMUL_H_
 
-#include <array>
-#include <cmath>
-#include <numeric>
 #include <memory>
+#include <string>
 #include <vector>
 
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class MatMulAttr { OutChannels };
 
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
-              public StaticAttributes<MatMulAttr, DimSize_t> {
+                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)> {
 public:
     static const std::string Type;
 
-    MatMul_Op() = delete;
-
-    using Attributes_ = StaticAttributes<MatMulAttr, DimSize_t>;
-    template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
-
-    MatMul_Op(DimSize_t out_channels)
-            : OperatorTensor(Type, 1, 1, 1),
-            Attributes_(
-                attr<MatMulAttr::OutChannels>(out_channels))
-    {}
+    MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MatMul_Op(const MatMul_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
+    MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
     {
         mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
@@ -63,50 +46,40 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MatMul_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<Operator> clone() const override final {
         return std::make_shared<MatMul_Op>(*this);
     }
 
-
-    void computeOutputDims() override final {
-        bool associated = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            // <batch, OutChannels>
-            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()});
-        }
-    }
+    /**
+     * @brief Compute dimensions for the output Tensor following the same rules as
+     * numpy.matmul.
+     * @note - Both inputs are 2-D Tensors: classic matrix multiplication
+     * @note - Either input is N-D with N > 2: it is treated as a stack of matrices residing
+     * in the last two indexes and broadcast accordingly.
+     * @note - First input is 1-D: it is promoted to a matrix by prepending a 1 to its
+     * dimensions (D) -> (1,D). The prepended 1 is removed after computation.
+     * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its
+     * dimensions (D) -> (D,1). The appended 1 is removed after computation.
+     */
+    void computeOutputDims() override final;
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
         mImpl = Registrar<MatMul_Op>::create(name)(*this);
         mOutputs[0]->setBackend(name, device);
     }
 
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight"};
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input1", "data_input2"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
-    // FIXME: properly handle default w initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
-    addProducer(matmul, 1, {outChannels, inChannels}, "w");
-    return matmul;
+inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::MatMulAttr>::data[] = {"OutChannels"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index cebc2d54041bb38c6e7f3434f12b559cec3d80af..808450030bdfc176c9cbc435c76b4932586397b8 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -118,9 +118,7 @@ public:
      * @brief Set the a new OperatorImpl to the Operator
      *
      */
-    void setImpl(std::shared_ptr<OperatorImpl> impl){
-        mImpl = impl;
-    }
+    inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
 
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 52d0118743373c23a4afe4a51d3f22adbe9e6848..5f07cddfa667e7e494defe38a5667332744c3e20 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -12,8 +12,10 @@
 #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 
+#include <algorithm>  // std::for_each
 #include <array>
 #include <cmath>
+#include <cstdint>    // std::int32_t
 #include <numeric>
 #include <vector>
 
@@ -31,18 +33,18 @@ enum class ReduceMeanAttr { Axes, KeepDims };
 template <DimIdx_t DIM>
 class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
-                public StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t> {
+                public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> {
 
    public:
     static const std::string Type;
 
     ReduceMean_Op() = delete;
 
-    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t>;
+    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr ReduceMean_Op(const std::array<int, DIM> &axes, DimSize_t keep_dims)
+    constexpr ReduceMean_Op(const std::array<std::int32_t, DIM> &axes, DimSize_t keep_dims)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<ReduceMeanAttr::Axes>(axes),
                       attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
@@ -67,29 +69,28 @@ class ReduceMean_Op : public OperatorTensor,
     }
 
     void computeOutputDims() override final {
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
         if (!getInput(0)->empty()) {
-            std::vector<DimSize_t> outDims;
-            for(std::size_t d=0; d<getInput(0)->dims().size(); ++d)
-            {
-                bool reducedDim =  false;
-                for(std::size_t i=0; i<DIM; ++i)
-                {
-                    int axis_ = this->template getAttr<ReduceMeanAttr::Axes>()[i];
-                    std::size_t axis= axis_>=0? axis_: axis_ + getInput(0)->nbDims();
-                    if(axis == d)
-                    {
-                        reducedDim = true;
-                        break;
-                    }
-                }
-                if(reducedDim)
-                {
-                    if(this->template getAttr<ReduceMeanAttr::KeepDims>())
-                        outDims.push_back(1);
-                }
-                else
-                    outDims.push_back(getInput(0)->dims()[d]);
+            // make Axes attribute positive
+            std::array<std::int32_t, DIM>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+            std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+                if (val < 0)
+                    val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+            });
+            std::sort(axes.begin(), axes.end());
+
+            // build output dimensions
+            std::vector<DimSize_t> outDims = getInput(0)->dims();
+            if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+                std::for_each(axes.begin(), axes.end(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+            }
+            else {
+                for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                    outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
             }
+
             if(outDims.size()>0)
                 mOutputs[0]->resize(outDims);
             else
@@ -111,7 +112,7 @@ class ReduceMean_Op : public OperatorTensor,
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
+inline std::shared_ptr<Node> ReduceMean(const std::array<std::int32_t, DIM> &axes,
                                         DimSize_t keep_dims=1,
                                         const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
@@ -123,7 +124,7 @@ inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ReduceMean(
-    int const (&axes)[DIM],
+    std::int32_t const (&axes)[DIM],
     DimSize_t keep_dims = 1,
     const std::string& name = "") {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 12a7425f3339b7fbc0ae010639aacf23d97b0f5f..4a073bc525640846c28d718d09741a67d499830e 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -29,17 +29,17 @@ enum class SliceAttr { Starts, Ends, Axes };
 class Slice_Op
     : public OperatorTensor,
       public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>,
-      public StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>> {
+      public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> {
 public:
     static const std::string Type;
 
     Slice_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int32_t>, std::vector<std::int32_t>, std::vector<std::int32_t>>;
+    using Attributes_ = StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>>;
     template <SliceAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    Slice_Op(const std::vector<std::int32_t>& starts, const std::vector<std::int32_t>&  ends, const std::vector<std::int32_t>& axes)
+    Slice_Op(const std::vector<std::int64_t>& starts, const std::vector<std::int64_t>&  ends, const std::vector<std::int64_t>& axes)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<SliceAttr::Starts>(starts),
                       attr<SliceAttr::Ends>(ends),
@@ -94,9 +94,9 @@ public:
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
-inline std::shared_ptr<Node> Slice(const std::vector<std::int32_t> starts,
-                                   const std::vector<std::int32_t> ends,
-                                   const std::vector<std::int32_t> axes,
+inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t> starts,
+                                   const std::vector<std::int64_t> ends,
+                                   const std::vector<std::int64_t> axes,
                                    const std::string &name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<Slice_Op>(starts, ends, axes), name);
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index a90b7ea189cf7c063bc540adec639a70a97e2b5b..aa63ff7208dbe22cb762f0f8fe1e0ffce9ad856e 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -21,6 +21,8 @@
 
 #include "aidge/utils/Types.h"
 
+#include "aidge/data/Tensor.hpp"
+
 namespace Aidge {
 class Node;
 class GraphView;
@@ -60,11 +62,17 @@ public:
         mStaticSchedule.clear();
         mStaticScheduleStep = 0;
     }
+    /**
+     * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph.
+     * 
+     * @param data data input tensors
+     */
+    void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
 
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void forward(bool forwardDims = true, bool verbose = false);
+    void forward(bool forwardDims = true, bool verbose = false, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
 
     /**
      * @brief Save in a Markdown file the order of layers execution.
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..80e7c76d4857f577f30b90588f4c3998be80bdb8
--- /dev/null
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -0,0 +1,107 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_STIMULI_STIMULUS_H_
+#define AIDGE_CORE_STIMULI_STIMULUS_H_
+
+#include <string>
+#include <memory>
+#include <tuple>
+
+#include "aidge/backend/StimulusImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+/**
+ * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
+ * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
+ */
+class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::unique_ptr<StimulusImpl>(const std::string&)> {
+private:
+    /// Stimulus data path
+    const std::string mDataPath;
+    const std::string mFileExtension;
+    bool mLoadDataInMemory;
+
+    /// Stimulus data ptr
+    std::shared_ptr<Tensor> mData;
+
+    // Implementation of the Stimulus
+    std::unique_ptr<StimulusImpl> mImpl;
+
+public:
+    Stimulus() = delete;
+
+    /**
+     * @brief Construct a new Stimulus object based on a tensor that is already loaded in memory.
+     *
+     * @param data the data tensor.
+     */
+    Stimulus(const std::shared_ptr<Tensor> data)
+    : mLoadDataInMemory(true),
+      mData(data)
+    {
+        // ctor
+    }
+
+    /**
+     * @brief Construct a new Stimulus object based on a dataPath to load the data.
+     *
+     * @param dataPath path to the data to be loaded.
+     * @param loadDataInMemory when true, keep the data in memory once loaded
+     */
+    Stimulus(const std::string& dataPath, bool loadDataInMemory = false)
+    : mDataPath(dataPath),
+      mFileExtension(dataPath.substr(dataPath.find_last_of(".") + 1)),
+      mLoadDataInMemory(loadDataInMemory)
+    {
+        AIDGE_ASSERT((dataPath.find_last_of(".") !=  std::string::npos), "Cannot find extension");
+    }
+
+    /**
+     * @brief Construct a new Stimulus object copied from another one.
+     * @param otherStimulus
+     */
+    Stimulus(const Stimulus& otherStimulus)
+        : mDataPath(otherStimulus.mDataPath),
+          mFileExtension(otherStimulus.mFileExtension),
+          mLoadDataInMemory(otherStimulus.mLoadDataInMemory),
+          mData(otherStimulus.mData)
+    {
+        if (otherStimulus.mImpl) {
+            mImpl = Registrar<Stimulus>::create({"opencv", mFileExtension})(mDataPath);
+        }
+    }
+
+    virtual ~Stimulus();
+
+public:
+    /**
+     * @brief Set the backend of the stimuli associated load implementation
+     * @details Create and initialize an implementation.
+     * @param name name of the backend.
+     */
+    inline void setBackend(const std::string &name) {
+        mImpl = Registrar<Stimulus>::create({name, mFileExtension})(mDataPath);
+    }
+
+    /**
+     * @brief Get the data tensor associated to the stimuli. The data is either loaded from a datapath or passed from an in-memory tensor.
+     *
+     * @return std::shared_ptr<Tensor> the data tensor.
+     */
+    virtual std::shared_ptr<Tensor> load();
+};
+} // namespace Aidge
+
+#endif // AIDGE_CORE_STIMULI_STIMULUS_H_
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index d3444000191022b575adaf1430319479daa5d4fc..927686cfd5cca910c5ffb25364ae4bc971ad18bf 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -69,6 +69,11 @@ public:
     *  be agnostic from its return type.
     */
     virtual py::object getAttrPy(const std::string& name) const = 0;
+    /* Bindable set function, does not recquire any templating.
+    *  This is thanks to py::object which allow the function to
+    *  be agnostic from ``value`` type.
+    */
+    virtual void setAttrPy(const std::string& name, py::object&& value) = 0;
 #endif
     virtual ~Attributes() {}
 };
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 2af8f47e9420f266cc6eca21f167944c761db7ea..44c3b1f5e8df833344fa9b7fe72bdb4ef1e0ec12 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -135,7 +135,7 @@ public:
         assert(res.second && "attribute already exists");
     }
 
-    void setAttrPy(const std::string& name, py::object&& value)
+    void setAttrPy(const std::string& name, py::object&& value) override final
     {
         auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
         if (!resPy.second)
@@ -204,7 +204,7 @@ private:
     // Stores C++ attributes (copy) and Python-only attributes
     // Code should be compiled with -fvisibility=hidden
     // See https://pybind11.readthedocs.io/en/stable/faq.html:
-    // “‘SomeClass’ declared with greater visibility than the type of its 
+    // “‘SomeClass’ declared with greater visibility than the type of its
     // field ‘SomeClass::member’ [-Wattributes]”
     // This map will only be populated if Python interpreter is running
     std::map<std::string, py::object> mAttrsPy;
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index e0c0d32c80ea970cf10cabc650537eef1b73b9b0..02789bd857be16e6892cb7486003530665b67495 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -202,6 +202,22 @@ public:
     }
 
     #ifdef PYBIND
+    /**
+     * @brief Return a set of attributes defined.
+     * This method is used to automatically retrieve attributes in the documentation.
+     * This method is a duplicate of ``getAttrsName`` but static.
+     *
+     * @return std::set<std::string>
+     */
+    static std::set<std::string> staticGetAttrsName() {
+        std::set<std::string> attrsName;
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            attrsName.insert(EnumStrings<ATTRS_ENUM>::data[i]);
+        }
+        return attrsName;
+    }
+
+
     py::object getAttrPy(const std::string& name) const override {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
@@ -212,7 +228,22 @@ public:
         }
 
         AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name.c_str());
-    };
+    }
+
+
+    void setAttrPy(const std::string& name, py::object&& value) override final{
+        for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
+                // Cannot update attribute using reference has it would require templating
+                // Use a dirty
+                auto tmpAttr = py::cast(mAttrs);
+                py::detail::accessor_policies::tuple_item::set(tmpAttr, static_cast<py::size_t>(i), value);
+                mAttrs = py::cast<std::tuple<T...>>(tmpAttr);
+                return;
+            }
+        }
+        AIDGE_THROW_OR_ABORT(py::value_error, "attribute \"{}\" not found", name.c_str());
+    }
     #endif
 
 private:
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..dfdf188946673c4e2a7ea2dc0829312758d80f96
--- /dev/null
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -0,0 +1,22 @@
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include "aidge/data/DataProvider.hpp"
+#include "aidge/data/Database.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_DataProvider(py::module& m){
+
+    py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
+          .def(py::init<Database&, std::size_t>(), py::arg("database"), py::arg("batchSize"))
+          .def("read_batch", &DataProvider::readBatch, py::arg("start_index"),
+          R"mydelimiter(
+          Return a batch of each data modality.
+
+          :param start_index: Database starting index to read the batch from
+          :type start_index: int
+          )mydelimiter");
+    
+}
+}
diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..903e692ca3d14d6ae25f0d6f151b1b08d557d924
--- /dev/null
+++ b/python_binding/data/pybind_Database.cpp
@@ -0,0 +1,13 @@
+#include <pybind11/pybind11.h>
+#include "aidge/data/Database.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Database(py::module& m){
+
+    py::class_<Database, std::shared_ptr<Database>>(m,"Database");
+
+    
+}
+}
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index c948b1ffd414fd1b421c9a842a16982501b5b2e0..e4b8da920114f2fff799b6414a2f8ba3b0515f6f 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -30,25 +30,27 @@ void addCtor(py::class_<Tensor,
                         Data,
                         Registrable<Tensor,
                                     std::tuple<std::string, DataType>,
-                                    std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>& mTensor){
-    mTensor.def(py::init([]( py::array_t<T, py::array::c_style | py::array::forcecast> b) {
+                                    std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
+    mTensor.def(py::init([](
+        py::array_t<T, py::array::c_style | py::array::forcecast> b,
+        std::string backend = "cpu") {
         /* Request a buffer descriptor from Python */
         py::buffer_info info = b.request();
         Tensor* newTensor = new Tensor();
         newTensor->setDataType(NativeType<T>::type);
         const std::vector<DimSize_t> dims(info.shape.begin(), info.shape.end());
         newTensor->resize(dims);
-        // TODO : Find a better way to choose backend
+
         std::set<std::string> availableBackends = Tensor::getAvailableBackends();
-        if (availableBackends.find("cpu") != availableBackends.end()){
-            newTensor->setBackend("cpu");
+        if (availableBackends.find(backend) != availableBackends.end()){
+            newTensor->setBackend(backend);
             newTensor->getImpl()->copyFromHost(static_cast<T*>(info.ptr), newTensor->size());
         }else{
-            printf("Warning : Could not use aidge_cpu backend, verify you have `import aidge_cpu`\n");
+            AIDGE_THROW_OR_ABORT(py::value_error, "Could not find backend {}, verify you have `import aidge_backend_{}`.\n", backend.c_str(), backend.c_str());
         }
 
         return newTensor;
-    }))
+    }), py::arg("array"), py::arg("backend")="cpu")
     .def("__setitem__", (void (Tensor::*)(std::size_t, T)) &Tensor::set)
     .def("__setitem__", (void (Tensor::*)(std::vector<std::size_t>, T)) &Tensor::set)
     ;
@@ -58,16 +60,16 @@ void addCtor(py::class_<Tensor,
 void init_Tensor(py::module& m){
     py::class_<Registrable<Tensor,
                            std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>,
+                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>,
                std::shared_ptr<Registrable<Tensor,
                                            std::tuple<std::string, DataType>,
-                                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>>(m,"TensorRegistrable");
+                                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>>(m,"TensorRegistrable");
 
     py::class_<Tensor, std::shared_ptr<Tensor>,
                Data,
                Registrable<Tensor,
                            std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>> pyClassTensor
+                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> pyClassTensor
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
     pyClassTensor.def(py::init<>())
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 9d7229994c8a9e1e5eb1d2bab694641ce3981c4b..94dcdd7f94e2e5b959742577506a7869ca783baf 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -103,7 +103,7 @@ void init_GraphView(py::module& m) {
 
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims)
+          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
           .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0)
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index f87cd5dd66f44535ff895f73b160fc5988e1009a..0ca01c07535f65ac1161603d32d191881eb28746 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
-  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -34,13 +34,14 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         py::arg("kernel_dims"),
         py::arg("stride_dims"))
   .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
+  .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
 
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
 
         return AvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()));
     }, py::arg("kernel_dims"),
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 1da4808568df6d5a8eab559c67cd1a95555233b5..e11fc288fb9eb837c0a7b36c0a1c4024ab6c8633 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,12 +21,13 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, OperatorTensor, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
     .def(py::init<float, float>(),
             py::arg("epsilon"),
             py::arg("momentum"))
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
-    .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
+    .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
+    .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 2b7e5d6b99194e914e48dc6263d0bdcd6a4a8a2f..8cdd138b8cde2a582e9f569a17ae33811637092c 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -19,9 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Concat(py::module& m) {
-    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance())
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, Attributes, OperatorTensor>(m, "ConcatOp", py::multiple_inheritance())
     .def("get_inputs_name", &Concat_Op::getInputsName)
-    .def("get_outputs_name", &Concat_Op::getOutputsName);
+    .def("get_outputs_name", &Concat_Op::getOutputsName)
+    .def("attributes_name", &Concat_Op::staticGetAttrsName);
 
     m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 2200cd3fec1450011d6e0b5197f8b99b4dfeb4c3..346acc5d9d05c24e9538c3b8c5edf1f7e37d6ba8 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -24,7 +24,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
-  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
@@ -39,6 +39,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         py::arg("dilation_dims"))
     .def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
     .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+    .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
     ;
 
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
@@ -47,9 +48,9 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
                                                          const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &dilation_dims) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return Conv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("in_channels"),
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 15f2c1c8acb4a1b59cfb0f35ebb78cb611647d3b..e25024e09cdd4fe234416a9aa8f0fef91a3c27fe 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
-  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const DimSize_t,
@@ -38,16 +38,17 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("dilation_dims"))
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
+  .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
 
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                                   const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return ConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("nb_channenls"),
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 606b9ae948847f98d5a1129c08db21e073311879..ad589d73d0aea94d96e62e8065b70bd517633f88 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -20,9 +20,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, OperatorTensor, Attributes>(m, "FCOp", py::multiple_inheritance())
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Attributes, OperatorTensor>(m, "FCOp", py::multiple_inheritance())
   .def("get_inputs_name", &FC_Op::getInputsName)
-  .def("get_outputs_name", &FC_Op::getOutputsName);
+  .def("get_outputs_name", &FC_Op::getOutputsName)
+  .def("attributes_name", &FC_Op::staticGetAttrsName);
 
   m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index f9768e38fbdceef4a15cc74430bc2205bb32cb6a..f0d55e2f40bd89269c96564cea6b5a002b477b8b 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -19,10 +19,11 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Gather(py::module& m) {
-    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance())
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, Attributes, OperatorTensor>(m, "GatherOp", py::multiple_inheritance())
     .def("get_inputs_name", &Gather_Op::getInputsName)
-    .def("get_outputs_name", &Gather_Op::getOutputsName);
+    .def("get_outputs_name", &Gather_Op::getOutputsName)
+    .def("attributes_name", &Gather_Op::staticGetAttrsName);
 
-    m.def("Gather", &Gather, py::arg("axis"), py::arg("name") = "");
+    m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 154fdfa64f279d8d6bb40ea7077acdb4c0fd51b9..6be4f31acde5bac14595d06570d7a3158d398db8 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -21,13 +21,36 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_GenericOperator(py::module& m) {
-    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, OperatorTensor, DynamicAttributes>(m, "GenericOperatorOp",
+    py::class_<GenericOperator_Op, std::shared_ptr<GenericOperator_Op>, DynamicAttributes, OperatorTensor>(m, "GenericOperatorOp",
                                                                                   py::multiple_inheritance())
     .def_readonly_static("identity", &GenericOperator_Op::Identity)
     .def("compute_output_dims", &GenericOperator_Op::computeOutputDims)
     .def("set_compute_output_dims", &GenericOperator_Op::setComputeOutputDims, py::arg("computation_function"));
 
-    m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"),
-          py::arg("name") = "");
+    // &GenericOperator
+    m.def("GenericOperator",
+        []( const std::string& type,
+            IOIndex_t nbData,
+            IOIndex_t nbParam,
+            IOIndex_t nbOut,
+            const std::string& name,
+            const py::kwargs kwargs){
+            std::shared_ptr<Node> genericNode = GenericOperator(
+                type,
+                nbData,
+                nbParam,
+                nbOut,
+                name
+            );
+            if (kwargs){
+                std::shared_ptr<GenericOperator_Op> gop = std::static_pointer_cast<GenericOperator_Op>(genericNode->getOperator());
+                for (auto item : kwargs) {
+                    std::string key = py::cast<std::string>(item.first);
+                    py::object value = py::reinterpret_borrow<py::object>(item.second);
+                    gop->setAttrPy(key, std::move(value));
+                }
+            }
+            return genericNode;
+        }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 07300633ad1fb8163d4456afd744c4eb5d7b0ed1..3e9acb831eb3334bd126d3b360f3b5aa39d83731 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -18,9 +18,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, OperatorTensor, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Attributes, OperatorTensor>(m, "LeakyReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
-    .def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
+    .def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
+    .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 242bf2c451723677e1b9063edfc3098d4159e5a4..d0d7f28d52a9a9899b08d37a0c1a4a8720f2ae20 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -19,15 +19,11 @@
 namespace py = pybind11;
 namespace Aidge {
 
-void declare_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor, Attributes>(m, "MatMulOp", py::multiple_inheritance())
+void init_MatMul(py::module &m) {
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
   .def("get_outputs_name", &MatMul_Op::getOutputsName);
 
-  m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
-}
-
-void init_MatMul(py::module &m) {
-  declare_MatMul(m);
+  m.def("MatMul", &MatMul, py::arg("name") = "");
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 0ee3d9df80d7ea7b7be2b8d5c456d5d739506882..9c83a67e81120e2cc2674e3ceb4c8871dd6fd393 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,7 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
-  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
@@ -36,14 +36,15 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("ceil_mode"))
   .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
+  .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
 
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   bool ceil_mode) {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
 
         return MaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
diff --git a/python_binding/operator/pybind_MetaOperatorDefs.cpp b/python_binding/operator/pybind_MetaOperatorDefs.cpp
index 98db4652a50329f02e4f7cace6072ffb46c1147d..443baacf81e6228f5371a2ea1c557fa83b86a067 100644
--- a/python_binding/operator/pybind_MetaOperatorDefs.cpp
+++ b/python_binding/operator/pybind_MetaOperatorDefs.cpp
@@ -32,10 +32,10 @@ template <DimIdx_t DIM> void declare_PaddedConvOp(py::module &m) {
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return PaddedConv<DIM>(in_channels, out_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("in_channels"),
@@ -55,10 +55,10 @@ template <DimIdx_t DIM> void declare_PaddedConvDepthWiseOp(py::module &m) {
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          const std::vector<DimSize_t> &dilation_dims)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
-        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [%ld] does not match DIM [%d]", dilation_dims.size(), DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(dilation_dims.size() == DIM, "dilation_dims size [{}] does not match DIM [{}]", dilation_dims.size(), DIM);
 
         return PaddedConvDepthWise<DIM>(nb_channels, to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), to_array<DIM>(dilation_dims.begin()));
     }, py::arg("nb_channels"),
@@ -76,9 +76,9 @@ template <DimIdx_t DIM> void declare_PaddedAvgPoolingOp(py::module &m) {
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &padding_dims)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
         return PaddedAvgPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()));
     }, py::arg("kernel_dims"),
@@ -95,9 +95,9 @@ template <DimIdx_t DIM> void declare_PaddedMaxPoolingOp(py::module &m) {
                                                          const std::vector<DimSize_t> &padding_dims,
                                                          bool ceil_mode)
     {
-        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [%ld] does not match DIM [%d]", kernel_dims.size(), DIM);
-        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [%ld] does not match DIM [%d]", stride_dims.size(), DIM);
-        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [%ld] does not match DIM [%d]", padding_dims.size(), 2*DIM);
+        AIDGE_ASSERT(kernel_dims.size() == DIM, "kernel_dims size [{}] does not match DIM [{}]", kernel_dims.size(), DIM);
+        AIDGE_ASSERT(stride_dims.size() == DIM, "stride_dims size [{}] does not match DIM [{}]", stride_dims.size(), DIM);
+        AIDGE_ASSERT(padding_dims.size() == 2*DIM, "padding_dims size [{}] does not match DIM [{}]", padding_dims.size(), 2*DIM);
 
         return PaddedMaxPooling<DIM>(to_array<DIM>(kernel_dims.begin()), name, to_array<DIM>(stride_dims.begin()), to_array<2*DIM>(padding_dims.begin()), ceil_mode);
     }, py::arg("kernel_dims"),
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 0956d6260e50d3be2418b1cf4089df87e442e54a..69d63fe7b8d31a6fa9747df2ce4a93ec4a0f4cac 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -25,7 +25,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
-  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Operator, Attributes>(
+  py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
     m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
@@ -36,13 +36,14 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
         py::arg("borderValue") = 0.0)
     .def("get_inputs_name", &Pad_Op<DIM>::getInputsName)
     .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
+    .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
     ;
 
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
                                                         const std::string& name,
                                                         const PadBorderType &borderType = PadBorderType::Constant,
                                                         double borderValue = 0.0) {
-        AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [%ld] does not match DIM [%d]", beginEndTuples.size(), 2*DIM);
+        AIDGE_ASSERT(beginEndTuples.size() == 2*DIM, "begin_end_tuples size [{}] does not match DIM [{}]", beginEndTuples.size(), 2*DIM);
         return Pad<DIM>(to_array<2*DIM>(beginEndTuples.begin()), name, borderType, borderValue);
     },
        py::arg("begin_end_tuples"),
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 78d9ce3489a8309c42cc90189e588a448fd9649a..3caa438d18b3919dbedcf66e4ba53b92b84a50b5 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -30,13 +30,14 @@ void declare_Producer(py::module &m) {
 
 
 void init_Producer(py::module &m) {
-    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, OperatorTensor, Attributes>(
+    py::class_<Producer_Op,  std::shared_ptr<Producer_Op>, Attributes, OperatorTensor>(
         m,
         "ProducerOp",
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims)
     .def("get_inputs_name", &Producer_Op::getInputsName)
-    .def("get_outputs_name", &Producer_Op::getOutputsName);
+    .def("get_outputs_name", &Producer_Op::getOutputsName)
+    .def("attributes_name", &Producer_Op::staticGetAttrsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
 
     declare_Producer<1>(m);
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index e5de98b69adde5133dde302f7306bc8a5c471eef..11e979736dcab211aa11758cb3138f9d6827cc4e 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -24,16 +24,17 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
-  py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
     .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
+    .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
     ;
 
   m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
                                                                 DimSize_t keepDims,
                                                                 const std::string& name) {
-        AIDGE_ASSERT(axes.size() == DIM, "axes size [%ld] does not match DIM [%d]", axes.size(), DIM);
+        AIDGE_ASSERT(axes.size() == DIM, "axes size [{}] does not match DIM [{}]", axes.size(), DIM);
 
         return ReduceMean<DIM>(to_array<DIM>(axes.begin()), keepDims, name);
     }, py::arg("axes"),
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 04e92d39971a731931397e943aba6e296a81a14d..780cffdef695b71dbc2781ba30936b3b45657cbb 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -19,9 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Attributes, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
     .def("get_inputs_name", &Softmax_Op::getInputsName)
-    .def("get_outputs_name", &Softmax_Op::getOutputsName);
+    .def("get_outputs_name", &Softmax_Op::getOutputsName)
+    .def("attributes_name", &Softmax_Op::staticGetAttrsName);
 
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index e92e9c2aaafe2d20220da053a2b9d799fbe8466d..f5fbaf0e75ddd81265fd17e0aeb18b54f3908627 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -25,16 +25,17 @@
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM> 
+template <DimIdx_t DIM>
 void declare_Transpose(py::module &m) {
-  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>(
+  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
   .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
-  .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName);
+  .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
+  .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
 
   m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
                                                                   const std::string& name) {
-        AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [%ld] does not match DIM [%d]", output_dims_order.size(), DIM);
+        AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);
         return Transpose<DIM>(to_array<DIM>(output_dims_order.begin()), name);
     }, py::arg("output_dims_order"),
        py::arg("name") = "");
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 736e7a1d62164bacb13ed12edaab760ff24e30f6..ff2ab4f390b70541251cfb505a1b510361d286a0 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -11,10 +11,15 @@
 
 #include <pybind11/pybind11.h>
 
+#include "aidge/backend/cpu/data/TensorImpl.hpp" // This include add Tensor
+
+
 namespace py = pybind11;
 
 namespace Aidge {
 void init_Data(py::module&);
+void init_Database(py::module&);
+void init_DataProvider(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
 void init_Attributes(py::module&);
@@ -67,6 +72,8 @@ void init_TensorUtils(py::module&);
 
 void init_Aidge(py::module& m){
     init_Data(m);
+    init_Database(m);
+    init_DataProvider(m);
     init_Tensor(m);
 
     init_Node(m);
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index b801898dcd251bdf1976eba1941407965c7153b6..170aa6c271a4f08ff5ad2801b754b647fee56df6 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -13,13 +13,14 @@
 #include <pybind11/stl.h>
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/graph/GraphView.hpp"
+#include "aidge/data/Tensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 void init_Scheduler(py::module& m){
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
-    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
+    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false, py::arg("data")=std::vector<Tensor>())
     .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &SequentialScheduler::resetScheduling)
     .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
diff --git a/python_binding/utils/pybind_Parameter.cpp b/python_binding/utils/pybind_Attributes.cpp
similarity index 79%
rename from python_binding/utils/pybind_Parameter.cpp
rename to python_binding/utils/pybind_Attributes.cpp
index 2957876f31ad0781a36905cef3a5ae88934b6a8a..bfce891176822a3b1c07b1ded0c46c9c94a43c0a 100644
--- a/python_binding/utils/pybind_Parameter.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -1,6 +1,7 @@
 #include <pybind11/pybind11.h>
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
@@ -21,11 +22,13 @@ void init_Attributes(py::module& m){
     .def("has_attr", &Attributes::hasAttr, py::arg("name"))
     .def("get_attr_type", &Attributes::getAttrType, py::arg("name"))
     .def("get_attrs_name", &Attributes::getAttrsName)
-    .def("get_attr", &Attributes::getAttrPy, py::arg("name"));
+    .def("get_attr", &Attributes::getAttrPy, py::arg("name"))
+    .def("__getattr__", &Attributes::getAttrPy, py::arg("name"))
+    .def("set_attr", &Attributes::setAttrPy, py::arg("name"), py::arg("value"))
+    .def("__setattr__", &Attributes::setAttrPy, py::arg("name"), py::arg("value"));
 
     py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
     .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
-    .def("set_attr", &DynamicAttributes::setAttrPy, py::arg("name"), py::arg("value"))
     .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
 
     m.def("test_DynamicAttributes_binding", &test_DynamicAttributes_binding);
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..dffb5745d9e324856548387069bcf1d5ff6a7b48
--- /dev/null
+++ b/src/data/DataProvider.cpp
@@ -0,0 +1,85 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Database.hpp"
+#include "aidge/data/DataProvider.hpp"
+#include "aidge/data/Tensor.hpp"
+
+
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize)
+    : mDatabase(database),
+      mNumberModality(database.getItem(0).size()),
+      mBatchSize(batchSize)
+{
+    // Iterating on each data modality in the database
+    // Get the tensor dimensions, datatype and backend of each modality to ensure each data have the same
+    for (const auto& modality : mDatabase.getItem(0)) {
+        mDataSizes.push_back(modality->dims());
+        // assert(std::strcmp(item[i]->getImpl()->backend(), "cpu") == 0 && "DataProvider currently only supports cpu backend tensors");
+        // mDataBackends.push_back(item[i]->getImpl()->backend());
+        mDataTypes.push_back(modality->dataType());
+    }
+}
+
+std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const std::size_t startIndex) const
+{
+    assert((startIndex) <= mDatabase.getLen() && " DataProvider readBatch : database fetch out of bounds");
+
+
+    // Determine the batch size (may differ for the last batch)
+    const std::size_t current_batch_size = ((startIndex + mBatchSize) > mDatabase.getLen()) ?
+                                            mDatabase.getLen()-startIndex :
+                                            mBatchSize;
+
+    // Create batch tensors (dimensions, backends, datatype) for each modality
+    std::vector<std::shared_ptr<Tensor>> batchTensors;
+    auto dataBatchSize = mDataSizes;
+    for (std::size_t i = 0; i < mNumberModality; ++i) {
+        dataBatchSize[i].insert(dataBatchSize[i].begin(), current_batch_size);
+        auto batchData = std::make_shared<Tensor>();
+        batchData->resize(dataBatchSize[i]);
+        // batchData->setBackend(mDataBackends[i]);
+        batchData->setBackend("cpu");
+        batchData->setDataType(mDataTypes[i]);
+        batchTensors.push_back(batchData);
+    }
+
+    // Call each database item and concatenate each data modularity in the batch tensors
+    for (std::size_t i = 0; i < current_batch_size; ++i){
+
+        auto dataItem = mDatabase.getItem(startIndex+i);
+        // assert same number of modalities
+        assert(dataItem.size() == mNumberModality && "DataProvider readBatch : item from database have inconsistent number of modality.");
+
+        // Browse each modularity in the database item
+        for (std::size_t j = 0; j < mNumberModality; ++j) {
+            auto dataSample = dataItem[j];
+
+            // Assert tensor sizes
+            assert(dataSample->dims() == mDataSizes[j] && "DataProvider readBatch : corrupted Data size");
+
+            // Assert implementation backend
+            // assert(dataSample->getImpl()->backend() == mDataBackends[j] && "DataProvider readBatch : corrupted data backend");
+
+            // Assert DataType
+            assert(dataSample->dataType() == mDataTypes[j] && "DataProvider readBatch : corrupted data DataType");
+
+            // Concatenate into the batch tensor
+            batchTensors[j]->getImpl()->copy(dataSample->getImpl()->rawPtr(), dataSample->size(), i*dataSample->size());
+        }
+    }
+    return batchTensors;
+}
\ No newline at end of file
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d45dee5639a6bc082871e1110657392fb97c15ec..4d8e0dcd7d29b47b7a3591652c6d3002698ab29c 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -9,10 +9,145 @@
  *
  ********************************************************************************/
 
+#include <vector>
+#include <cstddef>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
+    bool checkContiguous = true;
+    if (strides.empty()) {
+        strides.resize(dims.size());
+        size_t expectedStride = 1;
+        for (int dim = dims.size() - 1; dim >= 0; --dim) {
+            strides[dim] = expectedStride;
+            expectedStride*= dims[dim];
+        }
+        checkContiguous = false;
+    }
+    else {
+        AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
+    }
+
+    if (mImpl.use_count() > 1) {
+        // Here we could also create a new storage for this tensor in this case
+        // But, is it more likely that the user really wants this, or that he did a mistake?
+        AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
+    }
+    else {
+        mDims = dims;
+        mStrides = strides;
+
+        mContiguous = true;
+        if (checkContiguous) {
+            std::size_t expectedStride = 1;
+            for (std::size_t i = dims.size()-1; i > 0; --i) {
+                if (strides[i] != expectedStride) {
+                    mContiguous = false;
+                    break;
+                }
+                expectedStride*= dims[i];
+            }
+            mContiguous &= (strides[0] == expectedStride);
+        }
+
+        computeSize();
+        if (mImpl) {
+            mImpl->resize(mDims);
+        }
+    }
+}
+
+std::string Aidge::Tensor::toString() const {
+    AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
+
+    // TODO: move lambda elsewhere?
+    auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
+        switch (dt) {
+        case DataType::Float64:
+            return std::to_string(static_cast<double*>(ptr)[idx]);
+        case DataType::Float32:
+            return std::to_string(static_cast<float*>(ptr)[idx]);
+        case DataType::Float16:
+            return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
+        case DataType::Int8:
+            return std::to_string(static_cast<int8_t*>(ptr)[idx]);
+        case DataType::Int16:
+            return std::to_string(static_cast<int16_t*>(ptr)[idx]);
+        case DataType::Int32:
+            return std::to_string(static_cast<int32_t*>(ptr)[idx]);
+        case DataType::Int64:
+            return std::to_string(static_cast<int64_t*>(ptr)[idx]);
+        case DataType::UInt8:
+            return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
+        case DataType::UInt16:
+            return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
+        case DataType::UInt32:
+            return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
+        case DataType::UInt64:
+            return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
+        default:
+            AIDGE_ASSERT(true, "unsupported type to convert to string");
+        }
+        return std::string("?");  // To make Clang happy
+    };
+
+    if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
+    std::string res;
+    std::size_t dim = 0;
+    std::size_t counter = 0;
+    if (nbDims()>=2) {
+        std::vector<std::size_t> dimVals(nbDims(), 0);
+        res += "{\n";
+        while (counter < mSize) {
+            std::string spaceString = std::string((dim+1)<<1,' ');
+            if (dim < nbDims()-2) {
+                if (dimVals[dim] == 0) {
+                    res += spaceString + "{\n";
+                    ++dim;
+                } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
+                    res += spaceString + "},\n" + spaceString + "{\n";
+                    ++dim;
+                } else {
+                    res += spaceString + "}\n";
+                    dimVals[dim--] = 0;
+                    dimVals[dim]++;
+                }
+            } else {
+                for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
+                    res += spaceString + "{";
+                    for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
+                        res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
+                    }
+                    res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
+                    if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
+                        res += ",";
+                    }
+                    res += "\n";
+                }
+                if (dim == 0) {
+                    break;
+                }
+                dimVals[dim--] = 0;
+                dimVals[dim]++;
+            }
+        }
+
+        for(int i = static_cast<int>(dim); i > 0; --i) {
+            res += std::string((dim+1)<<1,' ') + "}\n";
+        }
+    } else {
+        res += "{";
+        for (DimSize_t j = 0; j < dims()[0]; ++j) {
+            res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
+        }
+    }
+    res += "}";
+    return res;
+}
+
 Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
     AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
@@ -44,7 +179,7 @@ void Aidge::Tensor::makeContiguous() {
     // Block so that mImpl ref count is 1 for resize()
     {
         // Create a new storage that will be contiguous
-        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
         // Copy elements from old to new storage
         size_t idx = 0;
         while (idx < mSize) {
@@ -52,7 +187,7 @@ void Aidge::Tensor::makeContiguous() {
 
             // Determine the size of the contiguous chunk
             size_t copySize = 1;
-            while (idx + copySize < mSize && 
+            while (idx + copySize < mSize &&
                 getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
             {
                 ++copySize;
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index b13338160257cf9bdeb2b3a86a3f6ff70de5bc78..dd94f8cdc8bd34bdaa48d9c4669dbb8d00caf902 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -317,12 +317,20 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims();
 }
 
-void Aidge::GraphView::forwardDims() {
+void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
     std::set<NodePtr> startNodes = inputNodes();
 
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
+    if (!dims.empty()){
+      AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of dimensions and graph inputs");
+      for (std::size_t i = 0; i < dims.size(); ++i){
+        auto tensor = std::make_shared<Tensor>(dims[i]);
+        mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+      }
+    }
+      
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
         for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
             // assess if the input was not already set and is a Tensor then link it to parent output
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 30804994b6084a5a5558f106a38a6087e54471bc..b5f9d738a0280b3bacdb2ce201c8303b2b4d0a1f 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#include <cassert>
 #include <cstddef>
+#include <cstdint>
 #include <string>
 #include <vector>
 
@@ -22,18 +22,26 @@ const std::string Aidge::Gather_Op::Type = "Gather";
 
 void Aidge::Gather_Op::computeOutputDims() {
     // check inputs have been associated
-    if (!getInput(0) || !getInput(1)) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
     }
 
-    if (getInput(1)->nbDims()!=2){
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Indices input must be a 2D Tensor");
-    }
+    if (!getInput(0)->empty()) {
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        const std::vector<DimSize_t> gatheredShape = this->template getAttr<GatherAttr::GatheredShape>();
+        // TODO: check indices and gatheredShape
+
+        const std::int64_t axisIdx = this->template getAttr<GatherAttr::Axis>() >= 0 ?
+                                        this->template getAttr<GatherAttr::Axis>() :
+                                        this->template getAttr<GatherAttr::Axis>() + outDims.size();
+        outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
+        if (!gatheredShape.empty())
+        {
+            outDims.insert(outDims.cbegin() + static_cast<std::size_t>(axisIdx),
+                            gatheredShape.cbegin(),
+                            gatheredShape.cend());
+        }
 
-    std::vector<DimSize_t> outDims = getInput(0)->dims();
-    std::vector<DimSize_t> indexesDims = getInput(1)->dims();
-    int axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?this->template getAttr<GatherAttr::Axis>():this->template getAttr<GatherAttr::Axis>()+outDims.size();
-    outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
-    outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), indexesDims.begin(),indexesDims.end());
-    mOutputs[0]->resize(outDims);
+        mOutputs[0]->resize(outDims);
+    }
 }
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 666ed3921ed1190a91935bd9f38303e23963d912..f48c7ca81d6abd1d5150f54eb7d98bf109307d33 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -9,8 +9,64 @@
  *
  ********************************************************************************/
 
+#include <algorithm>
 #include <string>
+#include <vector>
 
 #include "aidge/operator/MatMul.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::MatMul_Op::Type = "MatMul";
\ No newline at end of file
+const std::string Aidge::MatMul_Op::Type = "MatMul";
+
+void Aidge::MatMul_Op::computeOutputDims() {
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
+    }
+    if (getInput(0)->empty() && getInput(1)->empty()) {
+        // both inputs are scalar
+        mOutputs[0]->resize({});
+    }
+    else if (!getInput(0)->empty() && !getInput(1)->empty())
+    {
+        std::vector<std::size_t> dims0 = getInput(0)->dims();
+        std::vector<std::size_t> dims1 = getInput(1)->dims();
+
+        // keep second-to-last dimension of dims0
+        const bool keepDim0 = dims0.size() > 1;
+        // keep last dimension of dims1
+        const bool keepDim1 = dims1.size() > 1;
+
+        if (dims0.size() == 1) {
+            dims0.insert(dims0.cbegin(), 1);
+        }
+        if (dims1.size() == 1) {
+            dims1.push_back(1);
+        }
+        const std::size_t dims_size = std::max(dims0.size(), dims1.size());
+
+
+        if (dims0.size() > dims1.size()) {
+            dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+        }
+        else if (dims1.size() > dims0.size()) {
+            dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+        }
+
+        AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
+
+        std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
+        for (std::size_t i = 0; i < dims_size-2; ++i) {
+            AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
+            outDims[i] = std::max(dims0[i], dims1[i]);
+        }
+
+        // use keepDim0 instead of dims0.size() because dims0 has been modified
+        if (keepDim0)
+            outDims.push_back(dims0[dims_size-2]);
+        if (keepDim1)
+            outDims.push_back(dims1[dims_size-1]);
+
+        mOutputs[0]->resize(outDims);
+    }
+}
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index b0eea3c1f9f7054021b631c85e0f80e7f8845da6..30b060cd2a58d7995a7447bd9b85b9bc0026a7f7 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -9,39 +9,50 @@
  *
  ********************************************************************************/
 
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <cstdint>    // std::int64_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
 
 #include "aidge/operator/Reshape.hpp"
-#include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
 
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
 void Aidge::Reshape_Op::computeOutputDims() {
-    // check inputs have been associated
+    // check input has been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
     }
 
-    DimSize_t nbOutDims = this->template getAttr<ReshapeAttr::Shape>().size();
-    std::vector<DimSize_t> outDims;
-    std::size_t outSize = 1;
-    for(std::size_t i=0; i<nbOutDims; ++i)
-    {
-        int dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
-        if (dimSize < 1)
+    if (!getInput(0)->empty()) {
+        std::vector<DimSize_t> outDims;
+        // variables to handle a negative dimension
+        bool foundNegativeDimension = false;
+        std::size_t outSize = 1;
+        DimIdx_t negativeIndex = 0;
+
+        for(std::size_t i = 0; i < this->template getAttr<ReshapeAttr::Shape>().size(); ++i)
         {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "bad dimension value");
+            std::int64_t dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
+            if (dimSize < 0) {
+                if (foundNegativeDimension) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
+                }
+                foundNegativeDimension = true;
+                dimSize = 1;
+                negativeIndex = static_cast<DimIdx_t>(i);
+            }
+            outDims.push_back(static_cast<DimSize_t>(dimSize));
+            outSize *= static_cast<DimSize_t>(dimSize);
         }
-        outDims.push_back(dimSize);
-        outSize *= dimSize;
-    }
 
-    if (getInput(0)->size() != outSize){
-        AIDGE_THROW_OR_ABORT(std::runtime_error, "Output shape must give the same size as input");
-    }
+        if (foundNegativeDimension) {
+            outDims[negativeIndex] = (getInput(0) -> size()) / outSize;
+        }
 
-    mOutputs[0]->resize(outDims);
+        mOutputs[0]->resize(outDims);
+    }
 }
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index e4b072633ceff5923a25330440bbae7290f6465d..6d2670695b2ffe9acbf09edd3e82f8549a4184f0 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -30,21 +30,23 @@ void Aidge::Slice_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
     }
 
-    DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
+    const DimSize_t nbAxes = this->template getAttr<SliceAttr::Axes>().size();
     std::vector<DimSize_t> outDims = getInput(0)->dims();
     for (std::size_t i = 0; i < nbAxes; ++i) {
         // For each slice operation get the params and cast them to size_t
         const std::int64_t axis_ = this->template getAttr<SliceAttr::Axes>()[i];
         const std::int64_t start_ = this->template getAttr<SliceAttr::Starts>()[i];
         const std::int64_t end_ = this->template getAttr<SliceAttr::Ends>()[i];
-        const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : axis_ + getInput(0)->nbDims();
-        const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : start_ + getInput(0)->dims()[axis];
-        const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : end_ + getInput(0)->dims()[axis];
+        const std::size_t axis = axis_ >= 0 ? static_cast<std::size_t>(axis_) : static_cast<std::size_t>(axis_) + getInput(0)->nbDims();
+        const std::size_t start = start_ >= 0 ? static_cast<std::size_t>(start_) : static_cast<std::size_t>(start_) + getInput(0)->dims()[axis];
+        const std::size_t end = end_ >= 0 ? static_cast<std::size_t>(end_) : static_cast<std::size_t>(end_) + getInput(0)->dims()[axis];
 
         const std::size_t sliceLength = end - start + 1;
         // Check if slice length is valid
         if (sliceLength > getInput(0)->dims()[axis])
+        {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds");
+        }
         outDims[axis] = sliceLength;
     }
     mOutputs[0]->resize(outDims);
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 322b1d9a0632b893a912c6225ac5b13d63278f8d..85bfc408f092d9f234265db51a01eff1ab64005b 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -41,7 +41,19 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     AIDGE_ASSERT(matmulNode->getParent(1), "No weight detected to produce the fuseMulAdd recipe.");
 
     std::shared_ptr<Node> weight = matmulNode->getParent(1)->cloneSharedOperators();
-    const DimSize_t outSize = std::dynamic_pointer_cast<MatMul_Op>(matmulNode->getOperator()) -> getAttr<DimSize_t>("OutChannels");
+    // TODO: find another way to get OutChannels for FC operator.
+    // This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output
+    DimSize_t outSize = 0;
+    const auto& op = std::dynamic_pointer_cast<OperatorTensor>(addNode->getOperator());
+    for (size_t i = 0; i < op->nbInputs(); i++)
+    {
+        const auto& inTensor = op->getInput(i);
+        if(inTensor->nbDims() > 0) {
+            outSize = inTensor->dims()[inTensor->nbDims()-1];
+            break;
+        }
+    }
+    AIDGE_ASSERT(outSize, "Couldnt get output number of channels for FC operator.");
 
     // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
diff --git a/src/recipies/HorizontalTiling.cpp b/src/recipies/HorizontalTiling.cpp
index 6cc34eba076934b884b336ce40081a855d917182..7d3fafc0a15d1b797fdfb1a2884b62d2d8d766c5 100644
--- a/src/recipies/HorizontalTiling.cpp
+++ b/src/recipies/HorizontalTiling.cpp
@@ -82,16 +82,16 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std:
         clonedInputs[1] -> addChild(newNode, 0, 1);
         clonedInputs[2] -> addChild(newNode, 0, 2);
         // Slice for input and each parameter
-        std::vector<std::int32_t> inputDimsEnd(inputDims[0].first.size());
+        std::vector<std::int64_t> inputDimsEnd(inputDims[0].first.size());
         for (std::size_t dim = 0; dim < inputDimsEnd.size(); ++dim) {
-            inputDimsEnd[dim] = static_cast<std::int32_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1;
+            inputDimsEnd[dim] = static_cast<std::int64_t>(inputDims[0].first[dim] + inputDims[0].second[dim]) - 1;
         }
-        std::vector<std::int32_t> inputDimsStart(inputDims[0].first.size());
+        std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size());
         for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) {
-            inputDimsStart[dim] = static_cast<std::int32_t>(inputDims[0].first[dim]);
+            inputDimsStart[dim] = static_cast<std::int64_t>(inputDims[0].first[dim]);
         }
-        std::vector<std::int32_t> usedDims(inputDimsEnd.size());
-        std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int32_t>(0));
+        std::vector<std::int64_t> usedDims(inputDimsEnd.size());
+        std::iota(usedDims.begin(), usedDims.end(), static_cast<std::int64_t>(0));
         auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, "Slice_" + std::to_string(currentFirstDims[axis]));
         slice -> addChild(newNode, 0, 0);
         newNode -> addChild(concat, 0, i);
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index fcff5b8f43440229636bc65be8100d706a74d177..6f9dea3ae0b5d27e3fdea36adc0478deeb815a05 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -276,8 +276,28 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
     }
 }
 
+void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
+    // This version of connect inputs only connects tensor inputs in input data producers.
+    auto inputNodes = mGraphView->getOrderedInputs();
+
+    // Assert that the number of input data producers corresponds to the number of data input
+    assert(data.size() == inputNodes.size()  && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph");
+    
+    for (std::size_t i = 0; i < data.size(); ++i){
+        // TODO : maybe shallow copy instead of deepcopy
+        inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]);
+    }
+}
+
+
 // TODO: handle multiple inputs/outputs
-void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+    
+    // Collect all data input of the graph (that are producers)
+    if (!data.empty()){
+        connectInputs(data);
+    }
+
     // Forward dims (if allowed)
     if (forwardDims) {mGraphView->forwardDims(); }
 
diff --git a/src/stimuli/Stimulus.cpp b/src/stimuli/Stimulus.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6a91534475f6aaff44d5a2cd4da013434a99f9bf
--- /dev/null
+++ b/src/stimuli/Stimulus.cpp
@@ -0,0 +1,30 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/stimuli/Stimulus.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+Aidge::Stimulus::~Stimulus() = default;
+
+std::shared_ptr<Aidge::Tensor> Aidge::Stimulus::load() {
+    AIDGE_ASSERT((mImpl!=nullptr || mData!=nullptr), "No load implementation and No stored data");
+
+    if (mLoadDataInMemory){
+        if (mData == nullptr){
+            mData = mImpl->load();
+        }
+        return mData;
+    }
+    return mImpl->load();
+}
\ No newline at end of file
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e734fcd7770483dbcd9f594847ffd4297c071e68
--- /dev/null
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -0,0 +1,127 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <array>
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[core/data] Tensor creation") {
+  SECTION("from const array") {
+    Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+    Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+    Tensor xFloat =
+        Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
+
+    SECTION("Tensor features") {
+      REQUIRE(x.nbDims() == 3);
+      REQUIRE(x.dims()[0] == 2);
+      REQUIRE(x.dims()[1] == 2);
+      REQUIRE(x.dims()[2] == 2);
+      REQUIRE(x.size() == 8);
+    }
+
+    SECTION("Access to array") {
+      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[0] == 1);
+      REQUIRE(static_cast<int *>(x.getImpl()->rawPtr())[7] == 8);
+    }
+
+    SECTION("get function") {
+      REQUIRE(x.get<int>({0, 0, 0}) == 1);
+      REQUIRE(x.get<int>({0, 0, 1}) == 2);
+      REQUIRE(x.get<int>({0, 1, 1}) == 4);
+      REQUIRE(x.get<int>({1, 1, 0}) == 7);
+      x.set<int>({1, 1, 1}, 36);
+      REQUIRE(x.get<int>({1, 1, 1}) == 36);
+    }
+
+    SECTION("Pretty printing for debug") { REQUIRE_NOTHROW(x.print()); }
+
+    SECTION("Tensor (in)equality") {
+      REQUIRE(x == xCopy);
+      REQUIRE_FALSE(x == xFloat);
+    }
+  }
+}
+
+TEST_CASE("Tensor fill") {
+  SECTION("Instantiate batches independantly") {
+    // initialization with 0s
+    std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
+    //concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
+    std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
+    std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
+
+    // use copy function from implementation
+    concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
+    concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
+    concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
+    // concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
+      {{1,2,3,4,5},
+      {6,7,8,9,10},
+      {11,12,13,14,15}}
+    });
+    // expectedTensor->print();
+
+    REQUIRE(*concatenatedTensor == *expectedTensor);
+  }
+}
+
+TEST_CASE("[core/data] Tensor methods","[Tensor]") {
+  Tensor x = Array3D<int, 2, 2, 2>{{
+    {{1, 2},
+     {3, 4}},
+    {{5, 6},
+     {7, 8}}
+  }};
+
+  Tensor xCopy = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
+
+  Tensor xFloat =
+      Array3D<float, 2, 2, 2>{{{{1., 2.}, {3., 4.}}, {{5., 6.}, {7., 8.}}}};
+
+  SECTION("Tensor sharing") {
+    Tensor xCopyCtor(x);
+    REQUIRE(xCopyCtor.getImpl() == x.getImpl());
+
+    Tensor xEqOp = x;
+    REQUIRE(xEqOp.getImpl() == x.getImpl());
+
+    Tensor xCloned = x.clone();
+    REQUIRE(xCloned.getImpl() != x.getImpl());
+    REQUIRE(xCloned == x);
+  }
+
+  SECTION("Tensor extract") {
+    Tensor y = x.extract({0, 1});
+    REQUIRE(y.getImpl() == x.getImpl());
+    REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
+    REQUIRE(y.isContiguous());
+
+    Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
+    REQUIRE(y2.getImpl() == x.getImpl());
+    REQUIRE(!y2.isContiguous());
+    Tensor y3 = y2.clone();
+    REQUIRE(y3.isContiguous());
+    REQUIRE(approxEq<int>(y3, Array3D<int, 2, 1, 1>{{{{4}}, {{8}}}}));
+  }
+}
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index 924aac79ea8492f6ea0f2cd4d93676876c5a8331..1330a8e620ae5d49d6ef61257a587b914ffed1cd 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -126,9 +126,9 @@ TEST_CASE("GraphRegexUser") {
     SECTION("Applied Recipes"){
 
       // generate the original GraphView
-        auto matmul0 = MatMul(5, 5, "matmul0");
+        auto matmul0 = MatMul("matmul0");
         auto add0 = Add(2, "add0");
-        auto matmul1 = MatMul(5, 5, "matmul1");
+        auto matmul1 = MatMul("matmul1");
         auto add1 = Add(2, "add1");
 
         auto b0 = Producer({5}, "B0");
@@ -154,7 +154,7 @@ TEST_CASE("GraphRegexUser") {
 
 
         auto g = std::make_shared<GraphView>();
-        g->add({matmul0, add0, matmul1, add1, b0, b1,fl,fc});
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc});
 
         std::shared_ptr<GraphRegex> kitchenBook = std::make_shared<GraphRegex>();
 
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6c810e675ad46cc5580bd24e57f7e7dbb84db38f
--- /dev/null
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -0,0 +1,196 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutputDims]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dist(1, 10);
+
+    // Create MatMul Operator
+    std::shared_ptr<Node> myMatMul = MatMul();
+    auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
+
+    /** @todo Special case of scalar Tensor objects.
+     * Not handled yet.
+    */
+    // SECTION("0-D / 0-D") {
+    //     std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    //     T0->resize({});
+    //     op -> associateInput(0,T0);
+
+    //     // input_1 - right
+    //     std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    //     T1->resize({});
+    //     op -> associateInput(1,T1);
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims()).empty());
+
+    //     // input_1 - wrong
+    //     T1->resize({dist(gen)});
+
+    //     REQUIRE_THROWS(op->computeOutputDims());
+    // }
+
+    SECTION("1-D / N-D") {
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        const std::size_t dim0 = dist(gen);
+        T0->resize({dim0});
+        op -> associateInput(0,T0);
+
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        op -> associateInput(1,T1);
+
+        SECTION("1-D / 1-D") {
+            // input_1 - right
+            T1->resize({dim0});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()).empty());
+
+            // input_1 - wrong
+            T1->resize({dim0+1});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("1-D / 2-D") {
+            // input_1 - right
+            const std::size_t dim1 = dist(gen);
+            T1->resize({dim0,dim1});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1}));
+
+            // input_1 - wrong
+            T1->resize({dim0+1,dim1});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("1-D / +2-D") {
+            // input_1 - right
+            const std::size_t dim1 = dist(gen);
+            const std::size_t dim2 = dist(gen);
+            const std::size_t dim3 = dist(gen);
+            T1->resize({dim1,dim2,dim0,dim3});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1,dim2,dim3}));
+        }
+    }
+    SECTION("2-D / N-D") {
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        const std::size_t dim0 = dist(gen);
+        const std::size_t dim1 = dist(gen);
+        T0->resize({dim0,dim1});
+        op -> associateInput(0,T0);
+
+        // input_1
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        op -> associateInput(1,T1);
+
+        SECTION("2-D / 1-D") {
+            // input_1 - right
+            T1->resize({dim1});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0}));
+
+            // input_1 - wrong
+            T1->resize({dim1+1});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("2-D / 2-D") {
+            // input_1 - right
+            const std::size_t dim2 = dist(gen);
+            T1->resize({dim1, dim2});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim2}));
+
+            // input_1 - wrong
+            T1->resize({dim1+1,dim2});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("2-D / +2-D") {
+            // input_1 - right
+            const std::size_t dim2 = dist(gen);
+            const std::size_t dim3 = dist(gen);
+            const std::size_t dim4 = dist(gen);
+            T1->resize({dim3,dim4,dim1, dim2});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim3,dim4,dim0,dim2}));
+
+            // input_1 - wrong
+            T1->resize({dim3,dim4,dim1+1,dim2});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+    }
+    SECTION("+2-D / +2-D") {
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        const std::size_t dim0 = dist(gen) + 1;
+        const std::size_t dim1 = 1;
+        const std::size_t dim2 = dist(gen);
+        const std::size_t dim3 = dist(gen);
+        T0->resize({dim0,dim1,dim2,dim3});
+        op -> associateInput(0,T0);
+
+        // input_1
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        op -> associateInput(1,T1);
+
+        // input_1 - right
+        // 1
+        const std::size_t dim5 = dist(gen);
+        T1->resize({dim0,dim1,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
+
+        // 2 - input_1 broadcast
+        T1->resize({1,dim1,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
+
+        // 3 - input_0 broadcast
+        const std::size_t dim1_bigger = dist(gen) + 1;
+        T1->resize({dim0,dim1_bigger,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
+
+        // 4 - input_0+input_1 broadcast
+        T1->resize({1,dim1_bigger,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
+
+        // input_1 - wrong
+        T1->resize({dim0+1,dim1,dim3,dim5});
+        REQUIRE_THROWS(op -> computeOutputDims());
+    }
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipies/Test_FuseMulAdd.cpp
index 968826230dfdf85290ee377aee155e06855c4b28..d0875fe10078eb9d8e3a97e0703191b5697f3fda 100644
--- a/unit_tests/recipies/Test_FuseMulAdd.cpp
+++ b/unit_tests/recipies/Test_FuseMulAdd.cpp
@@ -25,9 +25,9 @@ namespace Aidge {
 
 TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
     // generate the original GraphView
-    auto matmul0 = MatMul(5, 5, "matmul0");
+    auto matmul0 = MatMul("matmul0");
     auto add0 = Add(2, "add0");
-    auto matmul1 = MatMul(5, 5, "matmul1");
+    auto matmul1 = MatMul("matmul1");
     auto add1 = Add(2, "add1");
 
     auto b0 = Producer({5}, "B0");
@@ -49,7 +49,7 @@ TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
     b1->addChild(add1, 0, 1);
 
     auto g = std::make_shared<GraphView>();
-    g->add({matmul0, add0, matmul1, add1, b0, b1});
+    g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
 
     // Check original graph
     REQUIRE(g->getNodes() ==
diff --git a/version.txt b/version.txt
index 8a9ecc2ea99d607e92feae1656ddbf6fdd82a2c1..17e51c385ea382d4f2ef124b7032c1604845622d 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.0.1
\ No newline at end of file
+0.1.1