diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index ead6c19fa5fe1e91ec1c24cf8dfee6146390477f..3dbf54a5fa58be40b08f58d760f3991586203825 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -554,16 +554,11 @@ public:
     inline void print() const { fmt::print("{}\n", toString()); }
 
     std::shared_ptr<Tensor> grad() {
-        // if (!mGrad && mImpl) {
-        //     mGrad = std::make_shared<Tensor>(mDims);
-        //     mGrad->setDataType(mDataType);
-        //     mGrad->setBackend(mImpl->backend());
-
-        //     // if (mImpl) mGrad->setBackend(mImpl->backend());
-        // }
-
         return mGrad;
     }
+    void setGrad(std::shared_ptr<Tensor> newGrad) {
+        mGrad = newGrad;
+    }
 
     /**
      * @brief Associate the gradient with a Tensor instance and set its implementation
@@ -574,7 +569,7 @@ public:
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGradient() {
+    void initGrad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 7e9072857dae8fa3137065e5c47cc11d88d37efe..23825079673129ea08aa7da40b21a8cc921d6ba0 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -105,7 +105,7 @@ public:
     void forward() override final;
 
     void backward() override final {
-        fmt::print("Basic Producer backward() function.\n");
+        // fmt::print("Basic Producer backward() function.\n");
     }
     void setOutput(const Aidge::IOIndex_t outputIdx, std::shared_ptr<Aidge::Data>&& data) override {
         if (getAttr<ProdAttr::Constant>()) {
diff --git a/include/aidge/scheduler/SequentialScheduler.hpp b/include/aidge/scheduler/SequentialScheduler.hpp
index 7201601254b779d64f23e9c0d1d00f5c6c23532a..a7929fde8a2affdd562d70d11a7c809aaf3357d0 100644
--- a/include/aidge/scheduler/SequentialScheduler.hpp
+++ b/include/aidge/scheduler/SequentialScheduler.hpp
@@ -54,7 +54,7 @@ public:
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instantiateGrad = true);
+    void backward(bool instantiateGrad = true);
 
 private:
     SchedulingPolicy mSchedulingPolicy;
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index b97af94ad583cf42e25fa3afc0697021f6dcadcc..3c2120565e1637697e5258723b1b366a520fdf80 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -77,7 +77,9 @@ void init_Tensor(py::module& m){
     .def("set_backend", &Tensor::setBackend, py::arg("name"), py::arg("device") = 0, py::arg("copyFrom") = true)
     .def("dims", (const std::vector<DimSize_t>& (Tensor::*)()const) &Tensor::dims)
     .def("grad", &Tensor::grad)
+    .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
+    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
     .def("has_impl", &Tensor::hasImpl)
diff --git a/python_binding/recipes/pybind_GraphViewHelper.cpp b/python_binding/recipes/pybind_GraphViewHelper.cpp
index ac56fb4b43eb5b0a737157ec9e64c6771a692816..e65b790d3eba6072e3e1b112c7d841959d4a5672 100644
--- a/python_binding/recipes/pybind_GraphViewHelper.cpp
+++ b/python_binding/recipes/pybind_GraphViewHelper.cpp
@@ -24,5 +24,6 @@ namespace py = pybind11;
 namespace Aidge {
 void init_GraphViewHelper(py::module &m) {
     m.def("producers", &producers, py::arg("graphview"));
+    m.def("compile_gradient", &compile_gradient, py::arg("graphview"));
 }
 } // namespace Aidge
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index 3f763c8ff0717fb07c1b6c1f85b6aba06c1dc8f1..b16134da324383a4542965393257288c49dceed0 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -34,7 +34,7 @@ void init_Scheduler(py::module& m){
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>, Scheduler>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
     .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("data")=std::vector<Tensor>())
-    .def("backward", &SequentialScheduler::backward, py::arg("data"), py::arg("instanciate_grad")=true)
+    .def("backward", &SequentialScheduler::backward, py::arg("instanciate_grad")=true)
     ;
 
     py::class_<ParallelScheduler, std::shared_ptr<ParallelScheduler>, Scheduler>(m, "ParallelScheduler")
diff --git a/src/filler/HeFiller.cpp b/src/filler/HeFiller.cpp
index 74d681f1a05c15045d27a0fe678aa676d16af077..ff20b76183c03e7ac90b5c225b3da7a8c6ffb2df 100644
--- a/src/filler/HeFiller.cpp
+++ b/src/filler/HeFiller.cpp
@@ -29,7 +29,9 @@ void Aidge::heFiller(std::shared_ptr<Aidge::Tensor> tensor,
               : (varianceNorm == Aidge::VarianceNorm::Average)
                   ? (fanIn + fanOut) / 2.0
                   : fanOut);
-
+    AIDGE_ASSERT(n > 0,
+                 "Something went wrong division by zero or square root of "
+                 "negative value.");
     const T stdDev(std::sqrt(2.0 / n));
 
     const T mean(varianceNorm == Aidge::VarianceNorm::FanIn ? meanNorm / fanIn
diff --git a/src/filler/XavierFiller.cpp b/src/filler/XavierFiller.cpp
index a1de15971ca8063e504e270fa6d2275d93270460..734874d449c83087ca0e93df7eeb620e178ee7ba 100644
--- a/src/filler/XavierFiller.cpp
+++ b/src/filler/XavierFiller.cpp
@@ -29,6 +29,9 @@ void Aidge::xavierUniformFiller(std::shared_ptr<Aidge::Tensor> tensor,
               : (varianceNorm == Aidge::VarianceNorm::Average)
                   ? (fanIn + fanOut) / 2.0
                   : fanOut);
+    AIDGE_ASSERT(n > 0,
+                 "Something went wrong division by zero or square root of "
+                 "negative value.");
     const T scale(std::sqrt(3.0 / n));
 
     std::uniform_real_distribution<T> uniformDist(-scale, scale);
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index f384c10138500f454720395e7387c331d67440b6..7059ea7e989d789b4cff0ed895fc2c5ec0ad81bc 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -72,9 +72,6 @@ void Aidge::Producer_Op::forward() {
     if (!backend().empty()) {
         mImpl->forward();
     }
-    else {
-        fmt::print("Basic Producer forward() function.\n");
-    }
 
     runHooks();
 }
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index 3b42db7fe18d2269b95cf35fd92851d1e3684bad..b0c99bffb895dc64b20d76991911ae5f4b604c85 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -51,7 +51,7 @@ void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
         AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
         const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
         for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGradient();
+            op->getOutput(o)->initGrad();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/src/scheduler/SequentialScheduler.cpp b/src/scheduler/SequentialScheduler.cpp
index f044603fb8b1316ec71728acec520204bb5361b8..74b1b3f0c6e9be164792460669821744661c15b3 100644
--- a/src/scheduler/SequentialScheduler.cpp
+++ b/src/scheduler/SequentialScheduler.cpp
@@ -73,21 +73,12 @@ void Aidge::SequentialScheduler::forward(bool forwardDims, const std::vector<std
     }
 }
 
-void Aidge::SequentialScheduler::backward(std::vector<std::shared_ptr<Aidge::Tensor>> data, bool instanciateGrad) {
+void Aidge::SequentialScheduler::backward(bool instanciateGrad) {
     // create ad set Grad values
     if (instanciateGrad) { compile_gradient(mGraphView); }
 
-    const auto& ordered_outputs = mGraphView->getOrderedOutputs();
-    AIDGE_ASSERT(ordered_outputs.size() == data.size(), "You must provide the \
-                   right number of data objects to run the backward function. \
-                   {} outputs detected for the current GraphView when {} were \
-                   provided.", ordered_outputs.size(), data.size());
-    for (std::size_t i = 0; i < ordered_outputs.size(); ++i) {
-        const std::shared_ptr<OperatorTensor> op_ = std::dynamic_pointer_cast<OperatorTensor>(ordered_outputs[i].first->getOperator());
-        const std::shared_ptr<Tensor> t_grad = op_->getOutput(ordered_outputs[i].second)->grad();
-        AIDGE_ASSERT(data[i]->dims() == t_grad->dims(), "Wrong gradient size.");
-        *t_grad = data[i]->clone();
-    }
+    // TODO: Check output grad are not empty
+
     // Generate scheduling *only if empty*
     // If scheduling was already generated (in one or several steps, i.e. one or
     // several successive call to generateScheduling()), do not generate it twice