diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index c5b027e70f2153d106fbaccef166d85dbe1efe1f..9e0e457b49fe40b2a6e9e3ce5c5e4b77bee1d93e 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -14,13 +14,15 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/StimulusImpl.hpp"
 
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
-
+#include "aidge/data/Database.hpp"
+#include "aidge/data/DataProvider.hpp"
 #include "aidge/graph/Connector.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
@@ -61,6 +63,7 @@
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
+#include "aidge/stimuli/Stimulus.hpp"
 
 #include "aidge/recipies/Recipies.hpp"
 
@@ -69,7 +72,5 @@
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-//#include "aidge/utilsParsing/AstNode.hpp"
-//#include "aidge/utilsParsing/ParsingToken.hpp"
 
 #endif /* AIDGE_IMPORTS_H_ */
diff --git a/include/aidge/backend/StimulusImpl.hpp b/include/aidge/backend/StimulusImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fbdf57b1587d76160c0cb146b6fe9da6947541dc
--- /dev/null
+++ b/include/aidge/backend/StimulusImpl.hpp
@@ -0,0 +1,32 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
+#define AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Base class to implement data loading functions.
+ */
+class StimulusImpl {
+public:
+    virtual ~StimulusImpl() noexcept = default;
+
+    virtual std::shared_ptr<Tensor> load() const = 0;
+};
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_BACKEND_STIMULUSIMPL_H_ */
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 62f13acb3db81954a4fbb753a3e68e1c5a516402..8539c8e36da86e7d15943f2a72826313858751ea 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -67,7 +67,10 @@ private:
 class TensorImpl {
 public:
     TensorImpl() = delete;
-    TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {};
+    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) 
+    {
+        resize(dims);
+    };
 
     /**
      * Return the (backend, device) pair for this implementation.
@@ -147,8 +150,12 @@ public:
     /**
      * Set the size, in number of elements, that must be stored.
     */
-    void resize(NbElts_t length) {
-        mNbElts = length;
+    virtual void resize(std::vector<DimSize_t> dims) {
+        size_t product = 1;
+        for (size_t num : dims) {
+            product *= num;
+        }
+        mNbElts = product;
     }
 
     /**
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 46dfae3d53b4b201507290bd538ea13737919c3e..a1fbfa336ae2148f687d5181a77e0dafa7466cf2 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -33,7 +33,7 @@ private:
 public:
     static constexpr const char *Backend = "cpu";
 
-    TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
+    TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {}
 
     bool operator==(const TensorImpl &otherImpl) const override final {
         const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
@@ -47,8 +47,8 @@ public:
         return i == mNbElts;
     }
 
-    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
-        return std::make_shared<TensorImpl_cpu<T>>(device, length);
+    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
+        return std::make_shared<TensorImpl_cpu<T>>(device, dims);
     }
 
     inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5c7a1c73ce4ad4eb512a446879cb1ad9b673eb2f
--- /dev/null
+++ b/include/aidge/data/DataProvider.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATAPROVIDER_H_
+#define AIDGE_CORE_DATA_DATAPROVIDER_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>   // std::shared_ptr
+#include <string>
+#include <vector>   // std::vector
+
+#include "aidge/data/Database.hpp"
+#include "aidge/data/Data.hpp"
+
+
+
+namespace Aidge {
+
+/**
+ * @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
+ * @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
+ * @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
+ */
+class DataProvider {
+private:
+    // Dataset providing the data to the dataProvider
+    const Database& mDatabase;
+
+    const std::size_t mNumberModality;
+    std::vector<std::vector<std::size_t>> mDataSizes;
+    std::vector<std::string> mDataBackends;
+    std::vector<DataType> mDataTypes;
+
+    // Desired size of the produced batches
+    const std::size_t mBatchSize;
+
+public:
+    /**
+     * @brief Constructor of Data Provider.
+     * @param database database from which to load the data.
+     * @param batchSize number of data samples per batch.
+     */
+    DataProvider(const Database& database, const std::size_t batchSize);
+
+public:
+    /**
+     * @brief Create a batch for each data modality in the database. The returned batch contain the data as sorted in the database.
+     * @param startIndex the starting index in the database to start the batch from.
+     * @return a vector of tensors. Each tensor is a batch corresponding to one modality.
+     */
+    std::vector<std::shared_ptr<Tensor>> readBatch(const std::size_t startIndex) const;
+};
+
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATAPROVIDER_H_ */
diff --git a/include/aidge/data/Database.hpp b/include/aidge/data/Database.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..edd4b4639fb415dfd723aca987ae754f6d5ccc63
--- /dev/null
+++ b/include/aidge/data/Database.hpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATABASE_H_
+#define AIDGE_CORE_DATA_DATABASE_H_
+
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Abstract class representing a map from a key to data.
+ * All databases should inherit from this class. All subclasses should overwrite
+ * :cpp:function:`Database::getItem` to fetch data from a given index.
+ */
+class Database {
+public:
+    Database() = default;
+    virtual ~Database() noexcept = default;
+
+    /**
+     * @brief Fetch an item of the database.
+     * @param index index of the item.
+     * @return vector of data mapped to index.
+     */
+    virtual std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const = 0;
+
+    /**
+     * @brief Get the number of items in the database
+     *
+     * @return std::size_t
+     */
+    virtual std::size_t getLen() const noexcept = 0;
+
+    /**
+     * @brief Get the number of modalities in one database item
+     *
+     * @return std::size_t
+     */
+    virtual std::size_t getNbModalities() const noexcept = 0;
+
+};
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATABASE_H_ */
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 658c0b497d9753f1bdfd42a274dbb48970cb6d6b..978a850466a09aec0c36d63cbdc819d2a12da200 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -32,7 +32,7 @@ namespace Aidge {
  * Contains a pointer to an actual contiguous implementation of data.
  */
 class Tensor : public Data,
-               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)> {
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
    private:
     DataType mDataType; /** enum to specify data type. */
     std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
@@ -59,11 +59,25 @@ class Tensor : public Data,
         // ctor
     }
 
+    /**
+     * @brief Construct a new Tensor object from dimensions.
+     *
+     * @param dims dimensions of the tensor
+     * @param dataType datatype of the tensor (default = DataType::Float32)
+     */
+    Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32)
+        : Data(Type),
+          mDataType(dataType),
+          mDims(dims)
+    {
+        computeSize();
+    }
+
     /**
      * @brief Construct a new Tensor object from another one (shallow copy).
      * Data memory is not copied, but shared between the new Tensor and the
      * initial one.
-     * 
+     *
      * @param otherTensor
      */
     Tensor(const Tensor&)            = default;
@@ -78,7 +92,7 @@ class Tensor : public Data,
             newTensor.makeContiguous();
         }
         else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);          
             newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
             newTensor.setImpl(newImpl);
         }
@@ -96,7 +110,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
           mSize(SIZE_0) {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
     }
@@ -105,7 +119,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
         resize({SIZE_0});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
         }
         mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
         return *this;
@@ -123,7 +137,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
           mSize(SIZE_0 * SIZE_1) {
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
@@ -132,7 +146,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
         resize({SIZE_0, SIZE_1});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
         }
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
         return *this;
@@ -151,7 +165,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2) {
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
@@ -160,7 +174,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
         }
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
         return *this;
@@ -180,7 +194,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
@@ -189,7 +203,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         }
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
         return *this;
@@ -250,7 +264,7 @@ class Tensor : public Data,
             if (mImpl->device() != std::make_pair(name, device)) {
                 // Backend change: create new impl, copy from old to new and replace
                 // impl
-                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mImpl->size());
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
                 if (copyFrom) {
                     newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
                 }
@@ -258,7 +272,7 @@ class Tensor : public Data,
             }
         }
         else {
-            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mSize);
+            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
         }
     }
 
@@ -288,7 +302,7 @@ class Tensor : public Data,
      */
     void setDataType(const DataType dt, bool copyCast = true) {
         if (mImpl && (dataType() != dt)) {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mImpl->size());
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mDims);
             if (copyCast) {
                 newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size());
             }
@@ -306,7 +320,7 @@ class Tensor : public Data,
 
     /**
      * @brief Set the Impl object
-     * 
+     *
      * @param impl New impl shared pointer
      * @param implOffset Storage offset in this new impl for this Tensor
      */
@@ -428,7 +442,7 @@ class Tensor : public Data,
 
             computeSize();
             if (mImpl) {
-                mImpl->resize(mSize);
+                mImpl->resize(mDims);
             }
         }
     }
@@ -631,7 +645,7 @@ class Tensor : public Data,
      * tensor is returned.
      * It current tensor was contiguous, the returned tensor is garanteed to be
      * contiguous as well.
-     * 
+     *
      * @param coordIdx Coordinates of the sub-tensor to extract
      * @return Tensor Sub-tensor.
     */
@@ -639,7 +653,7 @@ class Tensor : public Data,
 
     /**
      * Returns a sub-tensor at some coordinate and with some dimension.
-     * 
+     *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
      * @return Tensor Sub-tensor.
@@ -704,7 +718,7 @@ class Tensor : public Data,
      * The data type, backend and device stay the same.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
-     * will occur if fallback has already been allocated with the right 
+     * will occur if fallback has already been allocated with the right
      * type/size/device.
      * @return Reference to either itself or to fallback.
     */
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 813301a144682ba3e99de31ae324ffaedcc5209f..392fb59e65b8b844a091aaa89e7d623986dda85b 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -209,7 +209,7 @@ public:
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
      */
-    void forwardDims();
+    void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string &backend, DeviceIdx_t device = 0);
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index c966b5f5c1bb4914f3e46f96493da87a6707b1ff..624af6e755d882ca9585ac2e4175f9c3977e4058 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -59,7 +59,7 @@ public:
     // Helper functions that can be used with setComputeOutputDims():
     static const ComputeDimsFunc Identity;
 
-    void setComputeOutputDims(ComputeDimsFunc func) {
+    inline void setComputeOutputDims(ComputeDimsFunc func) {
         mComputeOutputDims = func;
     }
 
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index cebc2d54041bb38c6e7f3434f12b559cec3d80af..808450030bdfc176c9cbc435c76b4932586397b8 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -118,9 +118,7 @@ public:
      * @brief Set the a new OperatorImpl to the Operator
      *
      */
-    void setImpl(std::shared_ptr<OperatorImpl> impl){
-        mImpl = impl;
-    }
+    inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
 
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 6dcec5aaa4fa80aefebd538a1728445051ca080e..7a81503c967adce3ee000c36ee2f509901cda9ec 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -18,6 +18,8 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
+
 namespace Aidge {
 class Node;
 class GraphView;
@@ -49,11 +51,17 @@ public:
         mScheduling.clear();
         mStaticSchedule.clear();
     }
+    /**
+     * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph.
+     * 
+     * @param data data input tensors
+     */
+    void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
 
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void forward(bool forwardDims = true, bool verbose = false);
+    void forward(bool forwardDims = true, bool verbose = false, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
 
     /**
      * @brief Save in a Markdown file the order of layers execution.
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..80e7c76d4857f577f30b90588f4c3998be80bdb8
--- /dev/null
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -0,0 +1,107 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_STIMULI_STIMULUS_H_
+#define AIDGE_CORE_STIMULI_STIMULUS_H_
+
+#include <string>
+#include <memory>
+#include <tuple>
+
+#include "aidge/backend/StimulusImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+/**
+ * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
+ * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
+ */
+class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::unique_ptr<StimulusImpl>(const std::string&)> {
+private:
+    /// Stimulus data path
+    const std::string mDataPath;
+    const std::string mFileExtension;
+    bool mLoadDataInMemory;
+
+    /// Stimulus data ptr
+    std::shared_ptr<Tensor> mData;
+
+    // Implementation of the Stimulus
+    std::unique_ptr<StimulusImpl> mImpl;
+
+public:
+    Stimulus() = delete;
+
+    /**
+     * @brief Construct a new Stimulus object based on a tensor that is already loaded in memory.
+     *
+     * @param data the data tensor.
+     */
+    Stimulus(const std::shared_ptr<Tensor> data)
+    : mLoadDataInMemory(true),
+      mData(data)
+    {
+        // ctor
+    }
+
+    /**
+     * @brief Construct a new Stimulus object based on a dataPath to load the data.
+     *
+     * @param dataPath path to the data to be loaded.
+     * @param loadDataInMemory when true, keep the data in memory once loaded
+     */
+    Stimulus(const std::string& dataPath, bool loadDataInMemory = false)
+    : mDataPath(dataPath),
+      mFileExtension(dataPath.substr(dataPath.find_last_of(".") + 1)),
+      mLoadDataInMemory(loadDataInMemory)
+    {
+        AIDGE_ASSERT((dataPath.find_last_of(".") !=  std::string::npos), "Cannot find extension");
+    }
+
+    /**
+     * @brief Construct a new Stimulus object copied from another one.
+     * @param otherStimulus
+     */
+    Stimulus(const Stimulus& otherStimulus)
+        : mDataPath(otherStimulus.mDataPath),
+          mFileExtension(otherStimulus.mFileExtension),
+          mLoadDataInMemory(otherStimulus.mLoadDataInMemory),
+          mData(otherStimulus.mData)
+    {
+        if (otherStimulus.mImpl) {
+            mImpl = Registrar<Stimulus>::create({"opencv", mFileExtension})(mDataPath);
+        }
+    }
+
+    virtual ~Stimulus();
+
+public:
+    /**
+     * @brief Set the backend of the stimuli associated load implementation
+     * @details Create and initialize an implementation.
+     * @param name name of the backend.
+     */
+    inline void setBackend(const std::string &name) {
+        mImpl = Registrar<Stimulus>::create({name, mFileExtension})(mDataPath);
+    }
+
+    /**
+     * @brief Get the data tensor associated to the stimuli. The data is either loaded from a datapath or passed from an in-memory tensor.
+     *
+     * @return std::shared_ptr<Tensor> the data tensor.
+     */
+    virtual std::shared_ptr<Tensor> load();
+};
+} // namespace Aidge
+
+#endif // AIDGE_CORE_STIMULI_STIMULUS_H_
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..dfdf188946673c4e2a7ea2dc0829312758d80f96
--- /dev/null
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -0,0 +1,22 @@
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include "aidge/data/DataProvider.hpp"
+#include "aidge/data/Database.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_DataProvider(py::module& m){
+
+    py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
+          .def(py::init<Database&, std::size_t>(), py::arg("database"), py::arg("batchSize"))
+          .def("read_batch", &DataProvider::readBatch, py::arg("start_index"),
+          R"mydelimiter(
+          Return a batch of each data modality.
+
+          :param start_index: Database starting index to read the batch from
+          :type start_index: int
+          )mydelimiter");
+    
+}
+}
diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..903e692ca3d14d6ae25f0d6f151b1b08d557d924
--- /dev/null
+++ b/python_binding/data/pybind_Database.cpp
@@ -0,0 +1,13 @@
+#include <pybind11/pybind11.h>
+#include "aidge/data/Database.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Database(py::module& m){
+
+    py::class_<Database, std::shared_ptr<Database>>(m,"Database");
+
+    
+}
+}
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 9fbf08d0b782b6f39b2bef3d0b3ab918f6789ac0..e07f70eaa7de8dc4daa489ec93c8fd9273559ff2 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -30,7 +30,7 @@ void addCtor(py::class_<Tensor,
                         Data,
                         Registrable<Tensor,
                                     std::tuple<std::string, DataType>,
-                                    std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>& mTensor){
+                                    std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
     mTensor.def(py::init([](
         py::array_t<T, py::array::c_style | py::array::forcecast> b,
         std::string backend = "cpu") {
@@ -60,16 +60,16 @@ void addCtor(py::class_<Tensor,
 void init_Tensor(py::module& m){
     py::class_<Registrable<Tensor,
                            std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>,
+                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>,
                std::shared_ptr<Registrable<Tensor,
                                            std::tuple<std::string, DataType>,
-                                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>>(m,"TensorRegistrable");
+                                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>>(m,"TensorRegistrable");
 
     py::class_<Tensor, std::shared_ptr<Tensor>,
                Data,
                Registrable<Tensor,
                            std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>> pyClassTensor
+                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> pyClassTensor
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
     pyClassTensor.def(py::init<>())
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index eb26538a5db1eb40fdcb8a2e409067483d4a7d68..c41d99c1a5b034424da06aa9a6c5ba5c6aabbca3 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -100,7 +100,7 @@ void init_GraphView(py::module& m) {
 
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims)
+          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
           .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0)
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index e57b06cc5014e7159f5a3e5927aedfefb996cae4..ebf73e85583d3300ce68078dc8236001a4db1c96 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -18,6 +18,8 @@ namespace py = pybind11;
 
 namespace Aidge {
 void init_Data(py::module&);
+void init_Database(py::module&);
+void init_DataProvider(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
 void init_Attributes(py::module&);
@@ -69,6 +71,8 @@ void init_TensorUtils(py::module&);
 
 void init_Aidge(py::module& m){
     init_Data(m);
+    init_Database(m);
+    init_DataProvider(m);
     init_Tensor(m);
 
     init_Node(m);
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index d963b81d501f5cd2faf4f69810c897bb4b4da86d..4eb715e799158a1ead143430f574f98059662666 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -13,13 +13,14 @@
 #include <pybind11/stl.h>
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/graph/GraphView.hpp"
+#include "aidge/data/Tensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 void init_Scheduler(py::module& m){
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
-    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
+    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false, py::arg("data")=std::vector<Tensor>())
     .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &SequentialScheduler::resetScheduling)
     .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..dffb5745d9e324856548387069bcf1d5ff6a7b48
--- /dev/null
+++ b/src/data/DataProvider.cpp
@@ -0,0 +1,85 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Database.hpp"
+#include "aidge/data/DataProvider.hpp"
+#include "aidge/data/Tensor.hpp"
+
+
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize)
+    : mDatabase(database),
+      mNumberModality(database.getItem(0).size()),
+      mBatchSize(batchSize)
+{
+    // Iterating on each data modality in the database
+    // Get the tensor dimensions, datatype and backend of each modality to ensure each data have the same
+    for (const auto& modality : mDatabase.getItem(0)) {
+        mDataSizes.push_back(modality->dims());
+        // assert(std::strcmp(item[i]->getImpl()->backend(), "cpu") == 0 && "DataProvider currently only supports cpu backend tensors");
+        // mDataBackends.push_back(item[i]->getImpl()->backend());
+        mDataTypes.push_back(modality->dataType());
+    }
+}
+
+std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const std::size_t startIndex) const
+{
+    assert((startIndex) <= mDatabase.getLen() && " DataProvider readBatch : database fetch out of bounds");
+
+
+    // Determine the batch size (may differ for the last batch)
+    const std::size_t current_batch_size = ((startIndex + mBatchSize) > mDatabase.getLen()) ?
+                                            mDatabase.getLen()-startIndex :
+                                            mBatchSize;
+
+    // Create batch tensors (dimensions, backends, datatype) for each modality
+    std::vector<std::shared_ptr<Tensor>> batchTensors;
+    auto dataBatchSize = mDataSizes;
+    for (std::size_t i = 0; i < mNumberModality; ++i) {
+        dataBatchSize[i].insert(dataBatchSize[i].begin(), current_batch_size);
+        auto batchData = std::make_shared<Tensor>();
+        batchData->resize(dataBatchSize[i]);
+        // batchData->setBackend(mDataBackends[i]);
+        batchData->setBackend("cpu");
+        batchData->setDataType(mDataTypes[i]);
+        batchTensors.push_back(batchData);
+    }
+
+    // Call each database item and concatenate each data modularity in the batch tensors
+    for (std::size_t i = 0; i < current_batch_size; ++i){
+
+        auto dataItem = mDatabase.getItem(startIndex+i);
+        // assert same number of modalities
+        assert(dataItem.size() == mNumberModality && "DataProvider readBatch : item from database have inconsistent number of modality.");
+
+        // Browse each modularity in the database item
+        for (std::size_t j = 0; j < mNumberModality; ++j) {
+            auto dataSample = dataItem[j];
+
+            // Assert tensor sizes
+            assert(dataSample->dims() == mDataSizes[j] && "DataProvider readBatch : corrupted Data size");
+
+            // Assert implementation backend
+            // assert(dataSample->getImpl()->backend() == mDataBackends[j] && "DataProvider readBatch : corrupted data backend");
+
+            // Assert DataType
+            assert(dataSample->dataType() == mDataTypes[j] && "DataProvider readBatch : corrupted data DataType");
+
+            // Concatenate into the batch tensor
+            batchTensors[j]->getImpl()->copy(dataSample->getImpl()->rawPtr(), dataSample->size(), i*dataSample->size());
+        }
+    }
+    return batchTensors;
+}
\ No newline at end of file
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d45dee5639a6bc082871e1110657392fb97c15ec..10854153660175a8d30c28b2620e4e99bf460197 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -44,7 +44,7 @@ void Aidge::Tensor::makeContiguous() {
     // Block so that mImpl ref count is 1 for resize()
     {
         // Create a new storage that will be contiguous
-        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
         // Copy elements from old to new storage
         size_t idx = 0;
         while (idx < mSize) {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 968e98e75cc587977eb3033fe7f25936880755a4..a93d9af8a972605b1519e9974971ff9e7ad3ef2f 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -265,10 +265,18 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims();
 }
 
-void Aidge::GraphView::forwardDims() {
+void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
+    if (!dims.empty()){
+      AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of dimensions and graph inputs");
+      for (std::size_t i = 0; i < dims.size(); ++i){
+        auto tensor = std::make_shared<Tensor>(dims[i]);
+        mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+      }
+    }
+      
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
         for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
             // assess if the input was not already set and is a Tensor then link it to parent output
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 3afbcd0442fd40214687751d50bfc98809bba840..380ff8bf3ebabc1a7f7bf7c6f53d05fe99ab30dd 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -174,8 +174,28 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
 
 }
 
+void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
+    // This version of connect inputs only connects tensor inputs in input data producers.
+    auto inputNodes = mGraphView->getOrderedInputs();
+
+    // Assert that the number of input data producers corresponds to the number of data input
+    assert(data.size() == inputNodes.size()  && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph");
+    
+    for (std::size_t i = 0; i < data.size(); ++i){
+        // TODO : maybe shallow copy instead of deepcopy
+        inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]);
+    }
+}
+
+
 // TODO: handle multiple inputs/outputs
-void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+    
+    // Collect all data input of the graph (that are producers)
+    if (!data.empty()){
+        connectInputs(data);
+    }
+
     // Forward dims (if allowed)
     if (forwardDims) {mGraphView->forwardDims(); }
 
diff --git a/src/stimuli/Stimulus.cpp b/src/stimuli/Stimulus.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6a91534475f6aaff44d5a2cd4da013434a99f9bf
--- /dev/null
+++ b/src/stimuli/Stimulus.cpp
@@ -0,0 +1,30 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/stimuli/Stimulus.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+Aidge::Stimulus::~Stimulus() = default;
+
+std::shared_ptr<Aidge::Tensor> Aidge::Stimulus::load() {
+    AIDGE_ASSERT((mImpl!=nullptr || mData!=nullptr), "No load implementation and No stored data");
+
+    if (mLoadDataInMemory){
+        if (mData == nullptr){
+            mData = mImpl->load();
+        }
+        return mData;
+    }
+    return mImpl->load();
+}
\ No newline at end of file