diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index ee4d9cdcb6638c15ecffcb5d86de00fca62046e1..3dbf54a5fa58be40b08f58d760f3991586203825 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -569,7 +569,7 @@ public:
      * @note If Tensor instance and implementation already existed for the gradient
      * nothing is done.
      */
-    void initGradient() {
+    void initGrad() {
         if (!mGrad) {
             mGrad = std::make_shared<Tensor>(mDims);
         }
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index a21aa8be5a68ef32a4735eacb5701670a2d6a56c..1e4974c1c87c173b3056709b793ce9e0b78f35fd 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -1,4 +1,4 @@
-/********************************************************************************
+./********************************************************************************
  * Copyright (c) 2023 CEA-List
  *
  * This program and the accompanying materials are made available under the
@@ -79,7 +79,7 @@ void init_Tensor(py::module& m){
     .def("grad", &Tensor::grad)
     .def("set_grad", &Tensor::setGrad)
     .def("dtype", &Tensor::dataType)
-    .def("init_gradient", &Tensor::initGradient)
+    .def("init_grad", &Tensor::initGrad)
     .def("size", &Tensor::size)
     .def("resize", (void (Tensor::*)(const std::vector<DimSize_t>&, std::vector<DimSize_t>)) &Tensor::resize)
     .def("has_impl", &Tensor::hasImpl)
diff --git a/src/recipes/GraphViewHelper.cpp b/src/recipes/GraphViewHelper.cpp
index 3b42db7fe18d2269b95cf35fd92851d1e3684bad..b0c99bffb895dc64b20d76991911ae5f4b604c85 100644
--- a/src/recipes/GraphViewHelper.cpp
+++ b/src/recipes/GraphViewHelper.cpp
@@ -51,7 +51,7 @@ void Aidge::compile_gradient(std::shared_ptr<Aidge::GraphView> gv) {
         AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Cannot instanciate gradient of an Operator ({}) that doesn't use Tensor.", node->getOperator()->type());
         const std::shared_ptr<OperatorTensor> op = std::dynamic_pointer_cast<OperatorTensor>(node -> getOperator());
         for (std::size_t o = 0; o < node -> nbOutputs(); ++o) {
-            op->getOutput(o)->initGradient();
+            op->getOutput(o)->initGrad();
         }
     }
-}
\ No newline at end of file
+}