diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 687698e205afc36c06578fb25d6a714eb3963a66..82ecc7d28b723d2b3e268f4fb6fbf20d595240ff 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -134,6 +134,21 @@ void explicitTranspose(std::shared_ptr<GraphView> graphView);
 */
 void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 
+/**
+ * @brief Tile any :cpp:function:`Aidge::MatMul` operator to several fixed size matrix multiplications.
+ * For instance, for a MatMul of size 80x80 and a tiling of 16x16, this will tile 
+ * the MatMul operator to 25 (5 by 5) MatMul operators of size 16x16, with Slice
+ * operators inserted at the inputs and Concat operators inserted at the outputs.
+ * 
+ * This is especially useful when matrix multiplication must be mapped to fixed 
+ * maximum size hardware TPU (Tensor Processing Unit) or MMA (Matrix Multiplication 
+ * Accelerator). This recipe can be combined with the :cpp:function:`Aidge::convToMatMul` recipe in 
+ * order to convert convolutions to matrix multiplication beforehand, and 
+ * :cpp:function:`Aidge::constantFolding` recipe to fold sliced constant tensors.
+ * 
+ * @param matMul MatMul operator to be tiled.
+ * @param maxDims Maximum output dimensions of the tiled MatMul operators.
+ */
 void matMulTiling(NodePtr matMul, const std::vector<DimSize_t>& maxDims);
 
 /**