diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 6dcec5aaa4fa80aefebd538a1728445051ca080e..7a81503c967adce3ee000c36ee2f509901cda9ec 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -18,6 +18,8 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
+
 namespace Aidge {
 class Node;
 class GraphView;
@@ -49,11 +51,17 @@ public:
         mScheduling.clear();
         mStaticSchedule.clear();
     }
+    /**
+     * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph.
+     * 
+     * @param data data input tensors
+     */
+    void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
 
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void forward(bool forwardDims = true, bool verbose = false);
+    void forward(bool forwardDims = true, bool verbose = false, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
 
     /**
      * @brief Save in a Markdown file the order of layers execution.
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index d963b81d501f5cd2faf4f69810c897bb4b4da86d..4eb715e799158a1ead143430f574f98059662666 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -13,13 +13,14 @@
 #include <pybind11/stl.h>
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/graph/GraphView.hpp"
+#include "aidge/data/Tensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 void init_Scheduler(py::module& m){
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
-    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
+    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false, py::arg("data")=std::vector<Tensor>())
     .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &SequentialScheduler::resetScheduling)
     .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 3afbcd0442fd40214687751d50bfc98809bba840..380ff8bf3ebabc1a7f7bf7c6f53d05fe99ab30dd 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -174,8 +174,28 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
 
 }
 
+void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
+    // This version of connect inputs only connects tensor inputs in input data producers.
+    auto inputNodes = mGraphView->getOrderedInputs();
+
+    // Assert that the number of input data producers corresponds to the number of data input
+    assert(data.size() == inputNodes.size()  && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph");
+    
+    for (std::size_t i = 0; i < data.size(); ++i){
+        // TODO : maybe shallow copy instead of deepcopy
+        inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]);
+    }
+}
+
+
 // TODO: handle multiple inputs/outputs
-void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+    
+    // Collect all data input of the graph (that are producers)
+    if (!data.empty()){
+        connectInputs(data);
+    }
+
     // Forward dims (if allowed)
     if (forwardDims) {mGraphView->forwardDims(); }