diff --git a/.codespellrc b/.codespellrc
new file mode 100644
index 0000000000000000000000000000000000000000..156ac804789e34e3d78a272ad5a27973cb9adce8
--- /dev/null
+++ b/.codespellrc
@@ -0,0 +1,15 @@
+[codespell]
+builtin = clear,rare,en-GB_to_en-US,names,informal,code
+check-filenames =
+check-hidden =
+skip = */.git,*/build*,*/prefix,*/aidge_core,*/aidge_core.egg-info,*/cmake,*/future-std.clang-format
+quiet-level = 2
+# childs : used a lot and understandable
+# dOut,inH,ro : used for testing
+# deque : cpp data struct
+# inout : commented code variable
+# nd : commented code
+# neighbours : exception to the gb to us english rule
+# neighbouring : exception to the gb to us english rule
+# endcode : documentation keyword
+ignore-words-list = childs, dOut, inH, ro, deque, inout, stdio, nd, neighbours, neighbouring, endcode
diff --git a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1 b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
index c2715ea5550432838d3cc8692e97204b278d2c85..eb5658df3b96e3bc234e53d815bde4cecb4ed937 100644
--- a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
+++ b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
@@ -4,7 +4,7 @@ $ErrorActionPreference = "Stop"
 $AIDGE_DEPENDENCIES = $env:AIDGE_DEPENDENCIES -split ' '
 Write-Host "Aidge dependencies : $AIDGE_DEPENDENCIES"
 if ( $($AIDGE_DEPENDENCIES.Length) -eq 0) {
-        Write-Host "- No dependencies provided for current repsitory"
+        Write-Host "- No dependencies provided for current repository"
         New-Item -ItemType Directory -Force -Path ".\build" | Out-Null
         Remove-Item -Path ".\build\*" -Recurse -Force
     } else {
diff --git a/MANIFEST.in b/MANIFEST.in
index ae5b7c7c2e07eef97ef72bdb79cca94f8124981b..ed911dd75b59b65b8bfa023584aae8585de6325b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README.md LICENCE
+include README.md LICENSE
 recursive-include aidge_core *.py 
 recursive-exclude aidge_core/unit_tests *.py
 
diff --git a/README.md b/README.md
index fe8fd5a4252054c730be8e948d0d2e415c009d47..5fa6b938c6e333ac2c2292fc931749b3fd953b4f 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@ You can find here the C++ code of the Core library of Aidge.
 
 ## Pip installation
 
-To install aidge_core using pip, run the following command in your python environnement :
+To install aidge_core using pip, run the following command in your python environment :
 ``` bash
 pip install . -v
 ```
@@ -23,7 +23,7 @@ pip install . -v
 
 To setup aidge_core using pip in development (or editable mode), use the `--no-build-isolation -e` options to pip.
 
-For instance run the following command in your python environnement for a typical setup :
+For instance run the following command in your python environment for a typical setup :
 ``` bash
 export AIDGE_BUILD_TEST=ON              # enable C++ unit tests
 export AIDGE_PYTHON_BUILD_TYPE=         # default flags (no debug info but fastest build time)
@@ -85,7 +85,7 @@ make all install
 |   Option   | Value type | Description |
 |:----------:|:----------:|:-----------:|
 | *-DCMAKE_INSTALL_PREFIX:PATH* | ``str``  | Path to the install folder |
-| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimisations or "" (empty) , default= ``Release`` |
+| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimizations or "" (empty) , default= ``Release`` |
 | *-DWERROR*                    | ``bool`` | If ``ON`` show warning as error during compilation phase, default=``OFF`` |
 | *-DTEST*                      | ``bool`` | If ``ON`` build C++ unit tests, default=``ON`` |
 | *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``OFF`` |
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 649898dd130d5811f65f65af87bc117d3502647c..8a6684b22b4c4659353fa5b5dee2b0820c46a11f 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -140,7 +140,7 @@ public:
      * @brief Get the best adapted meta operator corresponding to the required
      * specifications \p requiredSpecs.
      * The best adaptation is the one with the lowest overhead cost.
-     * Currently, it is the one requiring the least number of additionnal
+     * Currently, it is the one requiring the least number of additional
      * operators to match the available implementations.
      *
      * @param requiredSpecs Required specifications
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 57c6c385d5fdcc9f2439983bd04cc8ece0d8d8f5..864789c19181b52351fc09a63a787feaed31a216 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -144,7 +144,7 @@ public:
 
     /**
      * Return the raw device pointer.
-     * The raw pointer is garanteed to be valid only on the *same* device.
+     * The raw pointer is guaranteed to be valid only on the *same* device.
      * @param offset Offset, in number of elements.
     */
     virtual void* rawPtr(NbElts_t offset = 0) = 0;
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 9390fe5860b5d3523886856d9b2a40752d338af5..d04624fc530a21730cc4dc1f4f1ac90a58e6590b 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -23,7 +23,7 @@ namespace Aidge {
 
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
-    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copyable");
+    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copiable");
 
 private:
     /// Pointer to the data and its capacity
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 6f877194252c7145ea61e1105e0edb0080409d46..7fa6de63f48561ef0585d5bb2f49b1a583710fb2 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -83,7 +83,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
 /**
  * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
  * @param src Source DataFormat
- * @param dst Destinatin DataFormat
+ * @param dst Destination DataFormat
  * @return DataFormatTranspose Permutation array to achieve a transposition
  *         from src to dst DataFormat.
 */
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 6c19b5355e406454a2e20bc8994d0ab04d53576a..ad145ca393e4d88210dbda98ab1cb8b37a2480ba 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -25,7 +25,7 @@ namespace Aidge {
 /**
  * @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
  * @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
- * @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
+ * @todo Implement readRandomBatch to compose batches from the database with a random sampling strategy. Necessary for training.
  */
 class DataProvider {
 private:
diff --git a/include/aidge/data/Interpolation.hpp b/include/aidge/data/Interpolation.hpp
index 2d53ebdd0dd5141acc9a3bce8e906f42f7a557a2..d1af3e831617660356fe48d7d5665564f125c21d 100644
--- a/include/aidge/data/Interpolation.hpp
+++ b/include/aidge/data/Interpolation.hpp
@@ -56,7 +56,7 @@ class Interpolation {
      * @param[in] transformedCoords : coords in interpolated tensor
      * @param[in] inputDims: input dimensions of tensor
      * @param[in] inputDims: output dimensions of tensor
-     * @return std::vector containing coords in orginal tensor reference frame
+     * @return std::vector containing coords in original tensor reference frame
      */
     static std::vector<float> untransformCoordinates(
         const std::vector<DimSize_t> &transformedCoords,
@@ -101,7 +101,7 @@ class Interpolation {
     /*
      * @brief Interpolates values given via input in given mode.
      *
-     * @warning This function is empty and is meant to be overriden in derived
+     * @warning This function is empty and is meant to be overridden in derived
      * class in backend libraries.
      *
      * Values are contiguously arranged in a "square" shape around the point to
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 5c84f52e052e67ca27bfc851f510e522d485e4b7..cfd54e9aa64a0ad6b5165024284b0e3431cab28c 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -76,7 +76,7 @@ class Tensor : public Data,
      * @brief Construct a new Tensor object from an arithmetic parameter.
      *
      * @tparam T Type of the input parameter.
-     * @tparam VT Decayed type of the input paramter.
+     * @tparam VT Decayed type of the input parameter.
      * @param val Input value.
      */
     template<typename T,
@@ -275,7 +275,7 @@ class Tensor : public Data,
     Tensor operator+(const Tensor& other) const;
 
     /**
-     * @brief Element-wise substraction operation for two ``Tensor``s.
+     * @brief Element-wise subtraction operation for two ``Tensor``s.
      * @note ``Tensor``s should be stored on the same backend.
      * @todo If input ``Tensor``s have a different dataType, the output should
      * have the dataType of the ``Tensor`` with the highest precision.
@@ -433,7 +433,7 @@ public:
     }
 
     /**
-     * @brief Return if an implementaiton has been associated.
+     * @brief Return if an implementation has been associated.
      * @return true
      * @return false
      */
@@ -500,8 +500,8 @@ public:
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
-     * reshape), data is garanteed to remain valid.
-     * Otherwise, no garantee is provided regarding the validy of previous data
+     * reshape), data is guaranteed to remain valid.
+     * Otherwise, no guarantee is provided regarding the validy of previous data
      * (unlike std::vector). If the new overall size is larger than the previous
      * one, all previous data is invalided. Otherwise, previous data may or may
      * not remain valid, depending on the backend implementation.
@@ -516,8 +516,8 @@ public:
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
-     * reshape), data is garanteed to remain valid.
-     * Otherwise, no garantee is provided regarding the validy of previous data
+     * reshape), data is guaranteed to remain valid.
+     * Otherwise, no guarantee is provided regarding the validy of previous data
      * (unlike std::vector). If the new overall size is larger than the previous
      * one, all previous data is invalided. Otherwise, previous data may or may
      * not remain valid, depending on the backend implementation.
@@ -715,7 +715,7 @@ public:
      * @note No memory copy is performed, the returned tensor does not own the memory.
      * @note If the number of coordinates matches the number of dimensions, a scalar
      * tensor is returned.
-     * @note If current tensor was contiguous, the returned tensor is garanteed to be
+     * @note If current tensor was contiguous, the returned tensor is guaranteed to be
      * contiguous as well.
      *
      * @param coordIdx Coordinates of the sub-tensor to extract
@@ -726,7 +726,7 @@ public:
     /**
      * @brief Returns a sub-tensor at some coordinate and with some dimension.
      *
-     * @note Data contiguity of the returned Tensor is not guaranted.
+     * @note Data contiguity of the returned Tensor is not guaranteed.
      *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
@@ -793,7 +793,7 @@ public:
     }
 
     /**
-     * Return a reference to a Tensor that is garanteed to be contiguous:
+     * Return a reference to a Tensor that is guaranteed to be contiguous:
      * - itself, if already contiguous;
      * - the provided Tensor, overwritten with the copied data.
      * The data type, backend and device stay the same.
diff --git a/include/aidge/data/half.hpp b/include/aidge/data/half.hpp
index 89df93cf3d10087833b3ad00dfbe3afd4e94c725..1464ac1e092e43059048825bf98d1186314b902c 100644
--- a/include/aidge/data/half.hpp
+++ b/include/aidge/data/half.hpp
@@ -213,11 +213,11 @@
 	#define HALF_ROUND_STYLE	-1			// = std::round_indeterminate
 #endif
 
-/// Tie-breaking behaviour for round to nearest.
+/// Tie-breaking behavior for round to nearest.
 /// This specifies if ties in round to nearest should be resolved by rounding to the nearest even value. By default this is
-/// defined to `0` resulting in the faster but slightly more biased behaviour of rounding away from zero in half-way cases (and
+/// defined to `0` resulting in the faster but slightly more biased behavior of rounding away from zero in half-way cases (and
 /// thus equal to the round() function), but can be redefined to `1` (before including half.hpp) if more IEEE-conformant
-/// behaviour is needed.
+/// behavior is needed.
 #ifndef HALF_ROUND_TIES_TO_EVEN
 	#define HALF_ROUND_TIES_TO_EVEN	0		// ties away from zero
 #endif
@@ -950,7 +950,7 @@ namespace half_float
 		/// Convert half-precision floating point to integer.
 		/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
 		/// \tparam E `true` for round to even, `false` for round away from zero
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<std::float_round_style R,bool E,typename T> T half2int_impl(uint16 value)
@@ -988,13 +988,13 @@ namespace half_float
 
 		/// Convert half-precision floating point to integer.
 		/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<std::float_round_style R,typename T> T half2int(uint16 value) { return half2int_impl<R,HALF_ROUND_TIES_TO_EVEN,T>(value); }
 
 		/// Convert half-precision floating point to integer using round-to-nearest-away-from-zero.
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<typename T> T half2int_up(uint16 value) { return half2int_impl<std::round_to_nearest,0,T>(value); }
@@ -1053,16 +1053,16 @@ namespace half_float
 
 	/// Half-precision floating point type.
 	/// This class implements an IEEE-conformant half-precision floating point type with the usual arithmetic operators and
-	/// conversions. It is implicitly convertible to single-precision floating point, which makes artihmetic expressions and
+	/// conversions. It is implicitly convertible to single-precision floating point, which makes arithmetic expressions and
 	/// functions with mixed-type operands to be of the most precise operand type. Additionally all arithmetic operations
 	/// (and many mathematical functions) are carried out in single-precision internally. All conversions from single- to
 	/// half-precision are done using the library's default rounding mode, but temporary results inside chained arithmetic
 	/// expressions are kept in single-precision as long as possible (while of course still maintaining a strong half-precision type).
 	///
 	/// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and
-	/// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which
+	/// extended definitions it is both a standard layout type and a trivially copiable type (even if not a POD type), which
 	/// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the
-	/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not neccessarily have to be of
+	/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not necessarily have to be of
 	/// exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will most
 	/// probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying 16-bit
 	/// IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16 bits if
@@ -2155,25 +2155,25 @@ namespace half_float
 		/// \name Arithmetic operators
 		/// \{
 
-		/// Add halfs.
+		/// Add halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return sum of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator+(T x, U y) { return functions::plus(x, y); }
 
-		/// Subtract halfs.
+		/// Subtract halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return difference of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator-(T x, U y) { return functions::minus(x, y); }
 
-		/// Multiply halfs.
+		/// Multiply halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return product of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator*(T x, U y) { return functions::multiplies(x, y); }
 
-		/// Divide halfs.
+		/// Divide halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return quotient of half expressions
@@ -2181,7 +2181,7 @@ namespace half_float
 
 		/// Identity.
 		/// \param arg operand
-		/// \return uncahnged operand
+		/// \return unchanged operand
 		template<typename T> HALF_CONSTEXPR typename enable<T,T>::type operator+(T arg) { return arg; }
 
 		/// Negation.
@@ -2330,28 +2330,28 @@ namespace half_float
 		inline expr exp2(half arg) { return functions::exp2(arg); }
 		inline expr exp2(expr arg) { return functions::exp2(arg); }
 
-		/// Natural logorithm.
+		/// Natural logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base e
 //		template<typename T> typename enable<expr,T>::type log(T arg) { return functions::log(arg); }
 		inline expr log(half arg) { return functions::log(arg); }
 		inline expr log(expr arg) { return functions::log(arg); }
 
-		/// Common logorithm.
+		/// Common logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base 10
 //		template<typename T> typename enable<expr,T>::type log10(T arg) { return functions::log10(arg); }
 		inline expr log10(half arg) { return functions::log10(arg); }
 		inline expr log10(expr arg) { return functions::log10(arg); }
 
-		/// Natural logorithm.
+		/// Natural logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg plus 1 to base e
 //		template<typename T> typename enable<expr,T>::type log1p(T arg) { return functions::log1p(arg); }
 		inline expr log1p(half arg) { return functions::log1p(arg); }
 		inline expr log1p(expr arg) { return functions::log1p(arg); }
 
-		/// Binary logorithm.
+		/// Binary logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base 2
 //		template<typename T> typename enable<expr,T>::type log2(T arg) { return functions::log2(arg); }
@@ -2620,7 +2620,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type ldexp(T arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half ldexp(half arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half ldexp(expr arg, int exp) { return functions::scalbln(arg, exp); }
@@ -2636,7 +2636,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type scalbn(T arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half scalbn(half arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half scalbn(expr arg, int exp) { return functions::scalbln(arg, exp); }
@@ -2644,7 +2644,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type scalbln(T arg, long exp) { return functions::scalbln(arg, exp); }
 		inline half scalbln(half arg, long exp) { return functions::scalbln(arg, exp); }
 		inline half scalbln(expr arg, long exp) { return functions::scalbln(arg, exp); }
@@ -2798,7 +2798,7 @@ namespace half_float
 		inline bool islessequal(expr x, half y) { return functions::islessequal(x, y); }
 		inline bool islessequal(expr x, expr y) { return functions::islessequal(x, y); }
 
-		/// Comarison for less or greater.
+		/// Comparison for less or greater.
 		/// \param x first operand
 		/// \param y second operand
 		/// \retval true if either less or greater
@@ -3027,7 +3027,7 @@ namespace std
 		/// Quiet NaN.
 		static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7FFF); }
 
-		/// Signalling NaN.
+		/// Signaling NaN.
 		static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7DFF); }
 
 		/// Smallest positive subnormal value.
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index c025ad770809864ac4e2d2c38e616e3d95e3d96a..76f5dcdfc28e90a3f83435841af21048bcb2a9c0 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -91,7 +91,7 @@ public:
 
     /**
      * @brief Set the node name.
-     * @warning Undefined behaviour when several Nodes have the same name.
+     * @warning Undefined behavior when several Nodes have the same name.
      * @param name New name for the node.
      */
     inline void setName(const std::string &name) { mName = name; }
@@ -184,7 +184,7 @@ public:
     /**
      * @brief List outside data input connections of the GraphView.
      * Data inputs exclude inputs expecting parameters (weights or bias).
-     * The vector size is garanteed to match the number of outside data inputs of the GraphView. If there is
+     * The vector size is guaranteed to match the number of outside data inputs of the GraphView. If there is
      * no external connection to a given input, a pair of nullptr and gk_IODefaultIndex is returned.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
@@ -214,7 +214,7 @@ public:
 
     /**
      * @brief List outside output connections of the GraphView. The vector
-     * size is garanteed to match the number of outputs of the GraphView. If there is
+     * size is guaranteed to match the number of outputs of the GraphView. If there is
      * no connection to a given output, the corresponding sub-vector will be empty.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
@@ -319,12 +319,12 @@ public:
      * - The childs and parents of the next node in the ranked list are then
      *   added to the list, and so on.
      * - Any remaining nodes have no path to the root node and are added in
-     *   arbitrary order. In this case, the ranking is not garanteed to be unique.
+     *   arbitrary order. In this case, the ranking is not guaranteed to be unique.
      *
-     * If the ranking cannot be garanteed to be unique, the second item indicates
-     * the rank from which unicity cannot be garanteed.
+     * If the ranking cannot be guaranteed to be unique, the second item indicates
+     * the rank from which unicity cannot be guaranteed.
      * @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked
-     * nodes and the size of the ranked sub-list where unicity is garanteed.
+     * nodes and the size of the ranked sub-list where unicity is guaranteed.
     */
     std::pair<std::vector<NodePtr>, size_t> getRankedNodes() const;
 
@@ -394,7 +394,7 @@ public:
      * @param fromOutNode Pointer to the already included Node the new Node will
      * be linked to (it will become a parent of the new Node). If the GraphView
      * only has one output Node, then default to this Node.
-     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * @param fromTensor Output Tensor ID of the already included Node. Default to
      * 0.
      * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
      * first available data input for the Node.
@@ -412,7 +412,7 @@ public:
      * be linked to (it will become a parent of the new Node). As a name is
      * optional, ensure such Node is in the GraphView or it will send back an
      * error message.
-     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * @param fromTensor Output Tensor ID of the already included Node. Default to
      * 0.
      * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
      * first available data input for the Node.
@@ -489,7 +489,7 @@ public:
      * Both sets should include all the necessary Producers.
      * @details There are 3 cases of replacement:
      * Case 1: same number of input/output connections for oldNodes and newNodes sets.
-     *     - input/output connections are replacated according to their IDs.
+     *     - input/output connections are replicated according to their IDs.
      * Case 2: different number of input/output connections for oldNodes and newNodes sets.
      *     - only a single parent/child node for the newNodes set, every input/output is
      *       connected to it.
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index b846af10b87b4088dab7fee41187ded91bf531d1..3b0874580b112f4c219886a78677e6c9801b72b8 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -53,7 +53,7 @@ public:
     struct MatchingResult {
         // Mutable is required to allow modifying MatchingResult members with a std::set
         // iterator. Any change should not modify the set ordering.
-        // We use graph->rootNode() as the std::set key, which is garanteed
+        // We use graph->rootNode() as the std::set key, which is guaranteed
         // to never change after insertion!
         mutable std::shared_ptr<GraphView> graph;
         mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
@@ -134,7 +134,7 @@ public:
      * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
      *
      * @param query The query to search.
-     * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
+     * @param disjoint If true, only keep the longest disjoint (non-overlapping) matches.
      * @return std::set<MatchingResult> Set of matches, each stored in a MatchingResult struct.
     */
     std::set<MatchingResult> match(const std::string& query, bool disjoint = false);
@@ -150,7 +150,7 @@ public:
     MatchingResult matchFrom(NodePtr startNode, const std::string& query);
 
     /**
-     * Filter to keep only the longuest disjoint (non-overlapping) matches.
+     * Filter to keep only the longest disjoint (non-overlapping) matches.
     */
     std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
 
@@ -216,7 +216,7 @@ private:
         bool operator()(const MatchingResult& lhs, const MatchingResult& rhs) const {
             // Some matches size could be the same
             if (lhs.graph->getNodes().size() == rhs.graph->getNodes().size()) {
-                // In this case, use rootNode which is garanteed to be different!
+                // In this case, use rootNode which is guaranteed to be different!
                 return lhs.graph->rootNode() < rhs.graph->rootNode();
             }
 
@@ -226,7 +226,7 @@ private:
 };
 
 inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
-    // Matches rootNode are garanteed to be different!
+    // Matches rootNode are guaranteed to be different!
     return lhs.graph->rootNode() < rhs.graph->rootNode();
 }
 }  // namespace Aidge
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 51cc9c444edf03febf4416149e9160df0bbfca9c..a16bbd63ecf52e8c97d5032c5c90a5f69186f995 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -135,7 +135,7 @@ public:
 
   /**
    * @brief Set the Node name.
-   * @warning Undefined behaviour when several Nodes have the same name.
+   * @warning Undefined behavior when several Nodes have the same name.
    * @param name New name for the node.
    */
   void setName(const std::string &name);
@@ -144,7 +144,7 @@ public:
    * @brief Given the parameter name generate a new name which is unique
    * in all the GraphView which contains this node.
    * To generate the new name the method is called recursively and append
-   * the caracter ``_``.
+   * the character ``_``.
    * If no duplicate return name, this is the exit condition.
    * @param name Base name to make unique.
    * @return A unique name in all the GraphView which contains this one.
@@ -191,7 +191,7 @@ public:
   bool valid() const;
 
   /**
-   * @brief List of pair <Parent, ID of the data intput>. When an input is not
+   * @brief List of pair <Parent, ID of the data input>. When an input is not
    * linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
    * Data inputs exclude inputs expecting parameters (weights or bias).
    * @return std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>
@@ -240,7 +240,7 @@ public:
 
   /**
    * @brief List input ids of children linked to outputs of the node. The vector
-   * size is garanteed to match the number of outputs of the node. If there is
+   * size is guaranteed to match the number of outputs of the node. If there is
    * no connection to a given output, the corresponding sub-vector will be empty.
    * @return std::vector<std::vector<std::pair<std::shared_ptr<Node>,
    * IOIndex_t>>>
@@ -333,7 +333,7 @@ public:
    * @param outId ID of the current Node output to connect to the other Node.
    * Default to 0.
    * @param otherInId ID of the other Node input to connect to the current Node.
-   * Default to the first avaible data input.
+   * Default to the first available data input.
    */
   void addChild(NodePtr otherNode,
                 const IOIndex_t outId = IOIndex_t(0),
@@ -410,7 +410,7 @@ public:
   bool removeChild(const NodePtr nodePtr, const IOIndex_t outId = 0);
 
   /**
-   * @brief Remove every link of surrounding nodes to it and conversly
+   * @brief Remove every link of surrounding nodes to it and conversely
    */
   void resetConnections(bool includeLearnableParam = false);
 
@@ -546,7 +546,7 @@ private:
    */
   void addParent(const NodePtr otherNode, const IOIndex_t inId);
 
-  // OPERATOR FUNCTIONNAL but commented out to avoid iostream inclusion
+  // OPERATOR FUNCTIONAL but commented out to avoid iostream inclusion
   // /**
   //  * @brief operator<< overload to ease print & debug of nodes
   //  * @param[inout] ostream to print to
diff --git a/include/aidge/graph/StaticAnalysis.hpp b/include/aidge/graph/StaticAnalysis.hpp
index d92356b72b8f1408c3084f9afa6f467d2043e620..d3fe681749eeb69e4816a38f302d510f1c81381a 100644
--- a/include/aidge/graph/StaticAnalysis.hpp
+++ b/include/aidge/graph/StaticAnalysis.hpp
@@ -49,7 +49,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only arithmetic operatons: Conv.
+     * Example of Operator with only arithmetic operations: Conv.
      * 
      * @return size_t Number of arithmetic operations.
      */
@@ -62,7 +62,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only logic operatons: BitShift.
+     * Example of Operator with only logic operations: BitShift.
      * 
      * @return size_t Number of logic operations.
      */
@@ -75,7 +75,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only comparison operatons: MaxPool.
+     * Example of Operator with only comparison operations: MaxPool.
      * 
      * @return size_t Number of comparison operations.
      */
@@ -88,7 +88,7 @@ public:
      * accesses are not included.
      * A naive implementation is considered (more operations might be required 
      * for numerical stability in an actual implementation).
-     * Example of Operator with only NL operatons: Tanh.
+     * Example of Operator with only NL operations: Tanh.
      * Non-linear operations are necessarily of floating-point type.
      * 
      * @return size_t Number of non-linear (NL) operations.
diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp
index 2c25ac0b76368242891e6e5ba92c2c5fc913a23c..b165891ff1c8a55e565e3520813b707303ddfd1f 100644
--- a/include/aidge/graphRegex/GraphParser.hpp
+++ b/include/aidge/graphRegex/GraphParser.hpp
@@ -17,7 +17,7 @@ class GraphParser {
 public:
     /**
      * @brief AST graph creation function
-     * @param gRegexExpressions String representing the logical fuction to be performed
+     * @param gRegexExpressions String representing the logical function to be performed
      */
     GraphParser(const std::string gRegexExpressions);
 
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
index 573447cf934b196e8b0c32d7a58e1977f5aa5f9a..f0f8e68e41a09cb54fb7528cb7f6ce065674af02 100644
--- a/include/aidge/graphRegex/GraphRegex.hpp
+++ b/include/aidge/graphRegex/GraphRegex.hpp
@@ -68,14 +68,14 @@ class GraphRegex{
 
     /**
      *  @brief brief match the queries in the graph 
-     *  @param ref the graph were the querys in search 
+     *  @param ref the graph were the queries in search 
      *  @return the result  
     */
     std::set<std::shared_ptr<MatchSolution>> match(std::shared_ptr<GraphView> ref);
 
     /***
-     *  @brief  match the queries in the graph and applied the recipes fuction  
-     *  @param ref the graph were the querys in search 
+     *  @brief  match the queries in the graph and applied the recipes function  
+     *  @param ref the graph were the queries in search 
     */
     void appliedRecipes(std::shared_ptr<GraphView> ref);
 
diff --git a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
index a6cc3e59247d4be98caa9881182bfba1c44e0178..6397da49478c44ef6050c5bad77f12ba10efaca7 100644
--- a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
@@ -60,7 +60,7 @@ namespace Aidge{
         virtual const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) =0;
 
         /**
-        *  @brief test is the egde test a common node
+        *  @brief test is the edge test a common node
         *  @return true if is a common
         */
         virtual bool isCommon(void);
@@ -70,7 +70,7 @@ namespace Aidge{
         */
         virtual size_t getCommonIdx(void);
         /**
-         * @brief get the relative postion to the common node deffine in this edge
+         * @brief get the relative position to the common node define in this edge
          * @return map
         */
         const std::map<size_t,int>& getRelative(void);
@@ -116,7 +116,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class specialization for not commun node (node that must be match one Unique) transition
+     * @brief class specialization for not common node (node that must be match one Unique) transition
     */
     class FsmEdgeUnique:public FsmEdge
     {
@@ -127,7 +127,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class specialization for  commun node transition
+     * @brief class specialization for  common node transition
      * @see FsmEdge
     */
     class FsmEdgeCommon:public FsmEdge
@@ -135,7 +135,7 @@ namespace Aidge{
 
         private:
         /**
-         * @brief the map that defind the ralation between the commonKey find by the lexer and a unique id use to refer to the common node
+         * @brief the map that define the relation between the commonKey find by the lexer and a unique id use to refer to the common node
         */
         static std::map<std::string,int> mCommonIdxMap;
         /**
@@ -145,7 +145,7 @@ namespace Aidge{
         public:
 
         /**
-         * @brief constructor  commun node ,
+         * @brief constructor  common node ,
          * @details during construction,
          * the node key found by the lexer is converted to a unique id and the relative positions are updated.
         */
@@ -159,7 +159,7 @@ namespace Aidge{
 
 
     /**
-     * @brief class spesialisation for ref transition
+     * @brief class specialization for ref transition
      * @see FsmEdge
     */
     class FsmEdgeRef:public FsmEdge
diff --git a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
index d718009e87e5360981ff93ff808124581917c089..e7402b3f0973e4b9e7053b4d59c9ff63ca6dd496 100644
--- a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
@@ -49,7 +49,7 @@ public:
 
     /**
      * @brief get the set of the valid states
-     * @return set of valide state
+     * @return set of valid state
     */
     const std::set<std::shared_ptr<FsmNode>> getValidNodes(void);
 
@@ -60,7 +60,7 @@ public:
     const std::set<std::shared_ptr<FsmNode>> getNodes(void);
 
     /**
-     * @brief set a groupe idx for all the nodes in the graph
+     * @brief set a group idx for all the nodes in the graph
     */
     void setGroupe(std::size_t groupeIdx);
 
diff --git a/include/aidge/graphRegex/matchFsm/FsmNode.hpp b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
index 7987c5ce33522ca7d43de1918d53e68738af6d18..f4636e0e025d26fa2afae88b6ffca28a511e9509 100644
--- a/include/aidge/graphRegex/matchFsm/FsmNode.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
@@ -31,10 +31,10 @@ namespace Aidge{
     /**
      * @brief is a node in the FSM graph, it's a state in the FSM
      * @details a state can be and/or :
-     * - a valide state, the match is valide if it stop on this edge
+     * - a valid state, the match is valid if it stop on this edge
      * - a start state , the match start on this state
      * The state is also define by this Origin (is the unique id of it's expretion )
-     * and it's groupe (for inner expression TODO)
+     * and it's group (for inner expression TODO)
     */
     class FsmNode : public std::enable_shared_from_this<FsmNode>
     {
@@ -84,7 +84,7 @@ namespace Aidge{
 
         bool isValid(void);
         bool isStart(void);
-        void unValid(void);
+        void invalid(void);
         void valid(void);
         void unStart(void);
         void start(void);
diff --git a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
index 36d09db47d23395d649a688252f2af803cb1bc9d..0b44172be0c3b671043fda884efadb84ba46e215 100644
--- a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
@@ -40,7 +40,7 @@ private:
     */
     std::map<NodePtr,std::size_t> mCommonNodes;
     /**
-     * @brief the map of the node that as been valid in this context , and the test that valide the node
+     * @brief the map of the node that as been valid in this context , and the test that valid the node
     */
     std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>> mValidNodes;
     /**
@@ -52,7 +52,7 @@ public:
      * @brief constructor
      * @param actState the actual state in the FSM
      * @param actOpNode the actual node in the graph
-     * @param idxRejeced the idx in the global regected node vector init max() as sentinel value of undefind
+     * @param idxRejeced the idx in the global regected node vector init max() as sentinel value of undefined
     */
     FsmRunTimeContext(std::shared_ptr<FsmNode> actState ,NodePtr actOpNode ,std::size_t idxRejeced =std::numeric_limits<std::size_t>::max() );
     FsmRunTimeContext(std::shared_ptr<FsmRunTimeContext> fsmRunTime);
@@ -85,7 +85,7 @@ public:
 
     /**
      * @ingroup FsmRunTimeContextTest
-     * @brief test if the actual state is valide
+     * @brief test if the actual state is valid
      * @return bool
      */
     bool isOnValidState(void);
diff --git a/include/aidge/nodeTester/ConditionalData.hpp b/include/aidge/nodeTester/ConditionalData.hpp
index 12df32a728571678a3885f9981e526e1d73db785..c6c521bd9c3e1a0333bb2a6c38545bb2bf6f3fe6 100644
--- a/include/aidge/nodeTester/ConditionalData.hpp
+++ b/include/aidge/nodeTester/ConditionalData.hpp
@@ -12,7 +12,7 @@ namespace Aidge{
 
 
 /////////////////////////
-// The data type in AST Intepretation
+// The data type in AST Interpretation
 ////////////////////////
 
 class BaseConditionalValue {
diff --git a/include/aidge/nodeTester/ConditionalInterpreter.hpp b/include/aidge/nodeTester/ConditionalInterpreter.hpp
index af6a3b920bb9ca389724860d55250d7ef4540677..713a166ec2cea7781ce98c850ecbf587eca58678 100644
--- a/include/aidge/nodeTester/ConditionalInterpreter.hpp
+++ b/include/aidge/nodeTester/ConditionalInterpreter.hpp
@@ -37,7 +37,7 @@ class ConditionalRegisterFunction {
      */
     template <typename T>
     T safeCastInput( std::shared_ptr<ConditionalData> data) {
-        //cnvertion and type cheking
+        //cnvertion and type checking
         if (data->isTypeEqualTo<T>()){
             return data->getValue<T>();
         }else{
@@ -123,7 +123,7 @@ class ConditionalRegisterFunction {
      */
     template <class F, std::size_t... ParamsIdx>
     auto funcPointer(F f, std::index_sequence<ParamsIdx...>) {
-        //wrapp the lambda in a new one that as ConditionalData as inputs and output
+        //wrap the lambda in a new one that as ConditionalData as inputs and output
     	return [this,f](std::vector< std::shared_ptr<ConditionalData>>  &args) {
             if (args.size() < sizeof...(ParamsIdx)){
                 std::ostringstream errorMessage;
@@ -199,10 +199,10 @@ class ConditionalRegisterFunction {
      /**
      * @brief Runs the function associated with the given key, using the provided vector of input data.
      * @param key The key of the function to run.
-     * @param datas The vector of input data.
+     * @param data The vector of input data.
      * @return A pointer to the output ConditionalData object.
      */
-     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas);
+     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data);
 
     bool isLambdaRegister(const std::string &key) {
         if(mWlambda.find(key) != mWlambda.end()){
@@ -237,7 +237,7 @@ class ConditionalInterpreter
      */
     std::shared_ptr<AstNode<ConditionalTokenTypes>> mTree;
     /**
-     * @brief the registery for the lambda fuction
+     * @brief the registry for the lambda function
      * @see ConditionalRegisterFunction
     */
     ConditionalRegisterFunction mLambdaRegister;
@@ -275,8 +275,8 @@ class ConditionalInterpreter
 
     /**
      * @brief Test a node depending of the ConditionalExpressions
-     * @details the AST is visit using \ref visit() whith the $ init whit the nodeOp
-     * @return bool the match node has the initialized expresion
+     * @details the AST is visit using \ref visit() with the $ init with the nodeOp
+     * @return bool the match node has the initialized expression
      * @see visit() This function uses the visit() function to perform the evaluation.
      */
     bool test( const NodePtr nodeOp);
@@ -295,7 +295,7 @@ class ConditionalInterpreter
     private:
     /**
      * @brief Recursive AST traversal function, using the for interpreting AST nodes function,
-     * using \ref ASTnodeInterpreterF fuctions
+     * using \ref ASTnodeInterpreterF functions
      * @param NodeOp The node currently being tested
      * @param nodes The AST given by the parsing process
      */
diff --git a/include/aidge/nodeTester/ConditionalLexer.hpp b/include/aidge/nodeTester/ConditionalLexer.hpp
index fcfb9ebe783ac719076ce675e6fc3d78caf5be07..0cf15d968bb6dae7532a1bcbb6c77b98ba0e42c6 100644
--- a/include/aidge/nodeTester/ConditionalLexer.hpp
+++ b/include/aidge/nodeTester/ConditionalLexer.hpp
@@ -65,7 +65,7 @@ private:
 
 /**
  * @brief Constructs an error message to display the character not understood by the lexer
- * @return error mesage
+ * @return error message
  */
 std::runtime_error badTokenError(const std::string& currentChars,std::size_t position);
 
diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp
index 1f3671ea5b68008a67be5d6a63d09051d49939d5..06b0e112cfe9bba6a4f0bf32eb1b793326a357f8 100644
--- a/include/aidge/nodeTester/ConditionalParser.hpp
+++ b/include/aidge/nodeTester/ConditionalParser.hpp
@@ -34,7 +34,7 @@ class ConditionalParser {
     public:
     /**
      * @brief AST graph creation function
-     * @param ConditionalExpressions String representing the logical fuction to be performed
+     * @param ConditionalExpressions String representing the logical function to be performed
      */
     ConditionalParser(const std::string ConditionalExpressions);
 
@@ -87,7 +87,7 @@ class ConditionalParser {
     std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstLambda(void);
     /**
     * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for a expresion : cmpr ((AND | OR) cmpr)*
+    * @brief Function of grammar rules for a expression : cmpr ((AND | OR) cmpr)*
     * @return AST node
     */
     std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstExpr(std::size_t precLimit = 0);
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index a799153e1db5eb83964ed06dd3bc0fb06da64de8..e9988b4421b785a91ec170796be49c0c8df52142 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -149,7 +149,7 @@ public:
 
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
@@ -163,7 +163,7 @@ public:
     /**
      * @brief Total amount of consumed data from a specific input.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
@@ -171,7 +171,7 @@ public:
     /**
      * @brief Total amount of produced data ready to be used on a specific output.
      *
-     * @param outputIdx Index of the output analysed.
+     * @param outputIdx Index of the output analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
@@ -207,8 +207,8 @@ public:
     inline IOIndex_t nbOutputs() const noexcept { return mNbOut; };
 
     /**
-     * @brief Set the back edge input indexes for recurring operators.
-     * Any recuring operators should specify it's back edges, otherwise
+     * @brief Set the back edge input indexes for recurting operators.
+     * Any recurring operators should specify it's back edges, otherwise
      * the interpretation of the data flow graph may not be possible.
      */
     inline void setBackEdges(const std::set<IOIndex_t>& backEdges) { mBackEdges = backEdges; }
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index c8cdd93810e18bd3cdd0a2d080e54aae2d787c66..19e2f13e4ff39fee181c6ad0cf2fbab510f22c3e 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -92,7 +92,7 @@ public:
 	 * @brief Will compute the dimensions of operator's output tensor given the input sizes
  	 *        If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors.
  	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
- 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors.
+ 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optional parameter tensors.
  	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
  	 *      
      */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 811402420df170c011e478148cf646e6c585cc84..055e6fd1d8917ae015b88a223f1f8701fd9dce59 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -87,7 +87,7 @@ public:
 };
 
 /**
- * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @brief Extract a sub-Tensor from a bigger original Tensor.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 8c3a111c42dfeb2b4e27269839e41f3b362bdda3..5a5652388c3622bf8a46792b3c58e00c79de22f3 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -84,7 +84,7 @@ public:
 };
 
 /**
- * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @brief Extract a sub-Tensor from a bigger original Tensor.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 64a775eb4209ecad0e29decd8336ebb77bbe652f..5c966edaf27271da79f9950cdf007cfcf446dd8d 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -53,7 +53,7 @@ enum class SqueezeAttr {
  * @brief This operator has as purpose to remove dummy dimensions around given
  * axes.
  * input#0 : Tensor to squeeze
- * input#1 Optionnal : 1D tensor that lists the axes to squeeze
+ * input#1 Optional : 1D tensor that lists the axes to squeeze
  * @note the axes to squeeze can either be given via attribute or via input #1,
  * for the sake of simplicity of the example unders, the axes to squeeze are
  * given via attribute
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index beeca8d72a2067ed2dfcd98cf3d9ff0cb7b6ff3a..c6341e934ea415cb23a7d4ce201351a0825e6081 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -40,7 +40,7 @@ public:
 
 enum class TransposeAttr {
   /**
-   * @brief order of the ouput dims from the input dims. If left empty,
+   * @brief order of the output dims from the input dims. If left empty,
    * the dimensions of input will be reversed.
    */
     OutputDimsOrder
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 2e397d1dbaa1cc8d8f586d15363cbd2245963152..880498515f36da4ecdf7f92aa7375981d5c67d10 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -33,7 +33,7 @@ namespace Aidge {
  * - A MemoryPlane is tailored for handling (N)HWC data with two properties:
  *   - Possibility of wrapping: on the H axis (each W*C block is contiguous).
  *   - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
- * - All the sizes and offets specified in a MemoryManager are expressed in
+ * - All the sizes and offsets specified in a MemoryManager are expressed in
  *   number of data elements, or **words**, meaning currently a uniform data 
  *   precision is expected in a MemoryManager (for instance, if the precision is
  *   16-bits, each data element will be 2 bytes, which will be the size of a word).
@@ -95,9 +95,9 @@ public:
      *   with different size, like NHWC = NHW(C1+C2):
      *   - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
      *   - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
-     *                    (with an additionnal relative offset of +C1)
+     *                    (with an additional relative offset of +C1)
      * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks
-     * are garanteed to be contiguous (\p length * \p stride).
+     * are guaranteed to be contiguous (\p length * \p stride).
      * 
      * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
      * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
@@ -255,7 +255,7 @@ public:
         /// with different size, like NHWC = NHW(C1+C2):
         /// - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
         /// - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
-        ///                  (with an additionnal relative offset of +C1)
+        ///                  (with an additional relative offset of +C1)
         /// By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
         /// there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
         /// In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
@@ -350,7 +350,7 @@ public:
                            unsigned int length = 1,
                            unsigned int count = 1);
     /// Generate a new MemoryPlane directly following an existing MemoryPlane
-    /// memPlane with an additionnal offset extraOffset
+    /// memPlane with an additional offset extraOffset
     MemoryPlane reallocate(const MemoryPlane& memPlane,
                            unsigned int extraOffset,
                            unsigned int size,
@@ -375,7 +375,7 @@ public:
                             unsigned int length = 1,
                             unsigned int count = 1);
     /// Generate a new MemoryPlane directly following an existing MemoryPlane
-    /// memPlane with an additionnal offset extraOffset
+    /// memPlane with an additional offset extraOffset
     unsigned int reallocate(const MemoryPlane& memPlane,
                             const std::shared_ptr<Node>& node,
                             unsigned int extraOffset,
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
index fce8d7f6548aaeb04300291d33cc2a5e44fb6fe7..cfc83cbf91cb7eeef2a3bbb0a4c5017a2480fe9b 100644
--- a/include/aidge/scheduler/ProdConso.hpp
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -37,7 +37,7 @@ public:
      * @brief Minimum amount of data from a specific input required by the
      * implementation to be run.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return std::size_t
      */
     virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
@@ -55,7 +55,7 @@ public:
     /**
      * @brief Total amount of consumed data from a specific input.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return DimSize_t
      */
     virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
@@ -63,19 +63,19 @@ public:
     /**
      * @brief Total amount of produced data ready to be used on a specific output.
      *
-     * @param outputIdx Index of the output analysed.
+     * @param outputIdx Index of the output analyzed.
      * @return DimSize_t
      */
     virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
     /**
-     * @brief Update the Consummer Producer system by simulating the consumption and production of i/o
+     * @brief Update the Consumer Producer system by simulating the consumption and production of i/o
      *
      */
     virtual void updateConsummerProducer();
 
     /**
-     * @brief Reset the Consummer Producer system.
+     * @brief Reset the Consumer Producer system.
      *
      */
     virtual void resetConsummerProducer();
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 2d03f4e8b8d5ce9c74f1d140a2e13317decc8dac..6e08885fc3f8966fba48be1c55a6965ac9e70775 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -190,7 +190,7 @@ protected:
 
     /**
      * @brief Generate an initial base scheduling for the GraphView.
-     * The scheduling is entirely sequential and garanteed to be valid w.r.t.
+     * The scheduling is entirely sequential and guaranteed to be valid w.r.t.
      * each node producer-consumer model.
      * @return Vector of pointers to `StaticSchedulingElement` representing the base schedule.
     */
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
index 3def790b65f441c567e5d43150f465233cb64557..af21d7912314c3eea1217811ae3e2b2da47a7a66 100644
--- a/include/aidge/stimuli/Stimulus.hpp
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -23,8 +23,8 @@
 
 namespace Aidge {
 /**
- * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
- * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
+ * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionally store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
+ * @details When Stimulus is used in the first mode, the loading function is determined automatically based on the backend and the file extension.
  */
 class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::function<std::unique_ptr<StimulusImpl>(const std::string&)>> {
 private:
diff --git a/include/aidge/utils/ArrayHelpers.hpp b/include/aidge/utils/ArrayHelpers.hpp
index 6648c654d28197dc018b94e8fa300366af52db4a..45a4c3c37da59e369bae2bb7e934c54bd844088d 100644
--- a/include/aidge/utils/ArrayHelpers.hpp
+++ b/include/aidge/utils/ArrayHelpers.hpp
@@ -85,7 +85,7 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a, std::index_sequen
  * @details append({1,2,7}, 3) -> {1,2,7,3}
  *
  * @tparam T Data type.
- * @tparam N Number of elements in the initilial array.
+ * @tparam N Number of elements in the initial array.
  * @param a Initial array.
  * @param t Element to add.
  * @return constexpr std::array<T, N + 1>
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index fd29bf4ce57ac94e0860172d2d1c15dc40f15ae0..f73de2ea31681a60c85229102a9b2ebbce4d3c3e 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -69,12 +69,12 @@ public:
     virtual std::map<std::string, future_std::any> getAttrs() const = 0;
 
 #ifdef PYBIND
-    /* Bindable get function, does not recquire any templating.
+    /* Bindable get function, does not require any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
     virtual py::object getAttrPy(const std::string& name) const  = 0;
-    /* Bindable set function, does not recquire any templating.
+    /* Bindable set function, does not require any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from ``value`` type.
     */
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 1b55d7afbf8263a77cf70752fc92f72ef5027904..3ecd4da393eaac9881d008e27989a52e883ecb6a 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -48,7 +48,7 @@ public:
      * \param name Attribute name
      * \details assert if T is not the actual Attribute type or if the Attribute does not
      *  exist
-     * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
+     * \note at() throws if the Attribute does not exist, using find to test for Attribute existence
      */
     template<class T> T getAttr(const std::string& name) const
     {
@@ -246,7 +246,7 @@ public:
 #ifdef PYBIND
     /**
      * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
-     * generic type caster for std::any is not feasable.
+     * generic type caster for std::any is not feasible.
      * The strategy here is to store a cast() function for each attribute type ever used.
     */
     inline py::object getAttrPy(const std::string& name) const override final {
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index 794f14124436668cd9ab0895ff602b8d43ad5dcc..d6851f1e42233f9d8af88d10da9046f73f94b8c4 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -24,7 +24,7 @@
 
 namespace Aidge {
 /**
- * Helper to define a context anywhere, hidding the scoped variable name
+ * Helper to define a context anywhere, hiding the scoped variable name
  * which has no relevance.
  */
 #define AIDGE_LOG_CONTEXT(...)                                                \
@@ -59,7 +59,7 @@ class Log {
     /**
      * Detailed messages for debugging purposes, providing information helpful
      * for developers to trace and identify issues.
-     * Detailed insights of what is appening in an operation, not useful for
+     * Detailed insights of what is happening in an operation, not useful for
      * the end-user. The operation is performed nominally.
      * @note This level is disabled at compile time for Release, therefore
      * inducing no runtime overhead for Release.
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 0468ae2616997c306bbd475fe6eb73cc033b0bcc..28dab05f80a64f12a59dc1f684652f66a96dc95f 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -14,7 +14,7 @@
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
-#include <pybind11/stl.h> // declare_registrable key can recquire stl
+#include <pybind11/stl.h> // declare_registrable key can require stl
 #include <pybind11/functional.h>// declare_registrable allow binding of lambda fn
 
 #endif
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 636863e292eeb677055dea379441ce422a6c90d8..439d2c638731b40bec0696a73b62b99e3bfddd41 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -53,7 +53,7 @@ public:
 */
 
     // Constructor for Attributes initialization.
-    // Compile-time garantee that every attribute is initialized.
+    // Compile-time guarantee that every attribute is initialized.
     template <ATTRS_ENUM ...attrsEnum> // non-type attribute pack
     constexpr StaticAttributes(const attr<attrsEnum>&&... attrs) {
         // Check number of attrs consistency
@@ -188,7 +188,7 @@ public:
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
-    // Runtime existance check with name
+    // Runtime existence check with name
     bool hasAttr(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index 88312280d572302ecce4157c34db0ba1efd52da9..e287db4e8724f0388c13d438fc2e152fe69021cd 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
  * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
  * @param t1  first :cpp:class:`Aidge::Tensor` to test
  * @param t2  second :cpp:class:`Aidge::Tensor` to test
- * @param relative relative difference allowed (should be betwen 0 and 1)
+ * @param relative relative difference allowed (should be between 0 and 1)
  * @param absolute absolute error allowed (shoulmd be positive)
  * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
  */
diff --git a/include/aidge/utilsParsing/ParsingToken.hpp b/include/aidge/utilsParsing/ParsingToken.hpp
index e303a5eabe6f7710873468f8edc8f3e844f4175f..8a1a740bf9bd4596675ef2dbd3e30af7765ffaa8 100644
--- a/include/aidge/utilsParsing/ParsingToken.hpp
+++ b/include/aidge/utilsParsing/ParsingToken.hpp
@@ -16,7 +16,7 @@ namespace Aidge{
         /**
          * @brief Token container
          * @param type one of the token type
-         * @param lexeme String representing aditional information of the token
+         * @param lexeme String representing additional information of the token
          */
         ParsingToken(const EnumType type , const std::string lexeme ):mLexeme(lexeme),mType(type){}
 
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 0ac42f507b722d5006a36ea59816766d54164c8d..0d4ed716ca6c65c2e8a0153a729ebecef771ea9e 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -251,7 +251,7 @@ static T castToNativeType(const py::object val_obj) {
 }
 
 static void addScalarCtor(pyTensorClass& mTensor) {
-    // Contructor based on bare py::object in order to match either
+    // Constructor based on bare py::object in order to match either
     // python scalars (int, float) or numpy scalars (np.int32, np.int64, ...).
     // There is a merge request to support numpy scalars in pybind, through py::numpy_scalar<T>
     // though it is not merged: https://github.com/pybind/pybind11/pull/3544/.
@@ -550,7 +550,7 @@ void init_Tensor(py::module& m){
     //   - np.ndarray of a given np.dtype: it will create an equivalent tensor of dtype == np.dtype when supported
     //   - np.dtype scalar: it will create an equivalent scalar tensor of dtype == np.dtype when supported
     //
-    // In order to implement this, we provide several overloads which are carefully ordered in order to fullfil
+    // In order to implement this, we provide several overloads which are carefully ordered in order to fulfill
     // the above requirements.
     //
 
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 4b9d2ad545c47971b7c0dff029585bb4c9ae5638..60d80e783d2e7d2e50d5f832b3508bf065edb707 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -95,7 +95,7 @@ void init_GraphView(py::module& m) {
           :type to_other_node: Node
           :param from_out_node: Node inside the GraphView the new Node will be linked to (it will become a parent of the new Node). If the GraphView only has one output Node, then default to this Node.
           :type from_out_node: Node
-          :param from_tensor: Ouput Tensor ID of the already included Node. Default to 0.
+          :param from_tensor: Output Tensor ID of the already included Node. Default to 0.
           :type from_tensor: int
           :param to_tensor: Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning first available data input for the Node.
           :type to_tensor: int
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 69a28960b57e6ba2ac8a699bf45ff09961fa4135..1f67b48a08d2fbe5db39436f599ecfc0f236268c 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -76,7 +76,7 @@ void init_Node(py::module& m) {
     :type other_node: :py:class: Node
     :param out_id: ID of the output of the current Node to connect to the other Node. (If Node has 1 output max ID is 0). Default to 0.
     :type out_id: int
-    :param other_in_id: ID of the input of the other Node to connect to the current Node (If the node is a Mul op it has 2 input then Max ID is 1).Default to the first avaible data input.
+    :param other_in_id: ID of the input of the other Node to connect to the current Node (If the node is a Mul op it has 2 input then Max ID is 1).Default to the first available data input.
     :type other_in_id: int
     )mydelimiter")
 
@@ -128,7 +128,7 @@ void init_Node(py::module& m) {
     R"mydelimiter(
     Get, for each output of the Node, a list of the children Node and the associated input index connected to it.
 
-    :return: List of a list of connections. When an outut is not linked to any child,  its list a empty.
+    :return: List of a list of connections. When an output is not linked to any child,  its list a empty.
     :rtype: list[list[tuple[Node, int]]]
     )mydelimiter")
 
@@ -187,7 +187,7 @@ void init_Node(py::module& m) {
             for (const auto &arg : args) {
                 // Check if the argument is an instance of Connector
                 if (pybind11::isinstance<Connector>(arg)) {
-                    // Convert Python object to C++ object adn push it ot vector
+                    // Convert Python object to C++ object and push it to vector
                     connectors.push_back(arg.cast<Connector>());
                 }
                 else if (arg.is(py::none())) {
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index b0d5ef2ef78380422ca1a137608f5289fa519aed..189337a384d55c91f6aceeb97c530ed92ef7b4d0 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -24,7 +24,7 @@ namespace Aidge {
 void init_ConstantOfShape(py::module &m) {
   py::class_<ConstantOfShape_Op, std::shared_ptr<ConstantOfShape_Op>, OperatorTensor>(
       m, "ConstantOfShapeOp", py::multiple_inheritance())
-      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
       .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
       .def("value", &ConstantOfShape_Op::value);
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 40c179c4064f07896113732a7e3c32db5f19c060..b61cb40cedbb5bfbc197c401454f205c737bc6ee 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -28,7 +28,7 @@ void init_Unsqueeze(py::module &m) {
 						with r = input_tensor.nbDims() + len(axes)
 		:type axes : :py:class: List[Int]
 		)mydelimiter")
-      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
       .def("axes", &Unsqueeze_Op::axes);
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index ca8d1f33086fb5093c76826e5a2f53df873badf5..aa42c6605217f63b6871d1a3475b9612097577cd 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -17,7 +17,7 @@ void init_Log(py::module& m){
           R"mydelimiter(
           Detailed messages for debugging purposes, providing information helpful
           for developers to trace and identify issues.
-          Detailed insights of what is appening in an operation, not useful for the
+          Detailed insights of what is happening in an operation, not useful for the
           end-user. The operation is performed nominally.
           Note: This level is disabled at compile time for Release, therefore
           inducing no runtime overhead for Release.
diff --git a/python_binding/utils/pybind_TensorUtils.cpp b/python_binding/utils/pybind_TensorUtils.cpp
index d82db0355ad641062ec89b1b331c74ccfde4c0b6..15fabdcb700ed5ca15d3d60952f55df488d41bc3 100644
--- a/python_binding/utils/pybind_TensorUtils.cpp
+++ b/python_binding/utils/pybind_TensorUtils.cpp
@@ -41,7 +41,7 @@ void addTensorUtilsFunction(py::module &m){
         :type t1: :py:class:`aidge_core.Tensor`
         :param t2: second tensor to test
         :type t2: :py:class:`aidge_core.Tensor`
-        :param relative: relative difference allowed (should be betwen 0 and 1)
+        :param relative: relative difference allowed (should be between 0 and 1)
         :type relative: float
         :param absolute: absolute error allowed (shoulmd be positive)
         :type absolute: float
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index e1f25e86827e81b26436876dce1b98fe0cda80b8..1354281933b69bb6e038587cc27ee0397d05c6f1 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -351,7 +351,7 @@ Aidge::IOIndex_t Aidge::GraphView::getNbDataInputs() const {
   for (const std::shared_ptr<Node> &inNode : inputNodes()) {
     // We cannot simply add inNode->nbDataInputs(), as input nodes may already
     // have some inputs connected within the GraphView, which would therefore not
-    // constitue inputs (from outside) for the GraphView!
+    // constitute inputs (from outside) for the GraphView!
     const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
         inNode->dataInputs();
 
@@ -433,7 +433,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
     // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
-    // following parent - children informations
+    // following parent - children information
     if (!dims.empty()){
       Log::debug("forwardDims(): setting graph input dims ({} dims provided).", dims.size());
 
@@ -535,7 +535,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                     Log::debug("Dimensions forwarded for node {} (of type {})",
                         nodePtr->name(), nodePtr->type());
 
-                    // Recompute everytime, even if it was already computed in a
+                    // Recompute every time, even if it was already computed in a
                     // previous call of forwardDims(), as the graph may have changed!
                     dimsForwarded.insert(nodePtr);
                     for (const auto& child : nodePtr->getChildren()) {
@@ -629,7 +629,7 @@ void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/,
 }
 
 void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) {
-  AIDGE_ASSERT(node != nullptr, "Trying to add non-existant node!");
+  AIDGE_ASSERT(node != nullptr, "Trying to add non-existent node!");
 
   // first node to be added to the graph is the root node by default
   if (mRootNode == nullptr) {
@@ -821,7 +821,7 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl
     mRootNode = *noParentNodes.begin();
 
     if (noParentNodes.size() > 1) {
-      // If there is more than one, order unicity cannot be garanteed!
+      // If there is more than one, order unicity cannot be guaranteed!
       orderUnicity = false;
     }
 
@@ -924,7 +924,7 @@ void Aidge::GraphView::addChild(
   // assert input node is valid
   if (!toNode.first) {
     assert(toOtherView->inputNodes().size() == 1U &&
-           "If no intput node is provided, the other graph should have only "
+           "If no input node is provided, the other graph should have only "
            "one to make the choice explicit.");
     toNode.first = *(toOtherView->inputNodes().begin());
   } else {
@@ -1045,7 +1045,7 @@ void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnab
 
 
 bool Aidge::GraphView::swap(Node & /*node*/, Node & /*otherNode*/) {
-  fmt::print("Swap() not implementated yet. Return false.\n");
+  fmt::print("Swap() not implemented yet. Return false.\n");
   return false;
 }
 
@@ -1476,7 +1476,7 @@ void Aidge::GraphView::updateInputsOutputsDelete(std::shared_ptr<Node> deletedNo
 std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*cloneNode)(NodePtr)) const {
   std::shared_ptr<GraphView> newGraph = std::make_shared<GraphView>(mName);
 
-  // Map for old node -> new node correspondance
+  // Map for old node -> new node correspondence
   std::map<NodePtr, NodePtr> oldToNewNodes;
 
   for (const std::shared_ptr<Node> &node_ptr : mNodes) {
diff --git a/src/graphRegex/GraphFsmInterpreter.cpp b/src/graphRegex/GraphFsmInterpreter.cpp
index 18b768c6567e64caf6841ed4a339f13fd16f69d6..a2f07129c468c737da022de8a6d1b093cdd08e39 100644
--- a/src/graphRegex/GraphFsmInterpreter.cpp
+++ b/src/graphRegex/GraphFsmInterpreter.cpp
@@ -145,7 +145,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::REF,mNodesCondition, lexem.str());
                     }else{
                         /*
-                        the sequencial quantify case 
+                        the sequential quantify case 
                         no reference to common 
                         */
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::EMPTY,mNodesCondition,"");
@@ -165,7 +165,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
 
 std::shared_ptr<FsmGraph> GraphFsmInterpreter::qzmF(std::shared_ptr<FsmGraph> fsm){
         /*
-        qomf and a bypass empty start to valide 
+        qomf and a bypass empty start to valid 
         */
     fsm = qomF(fsm);
 
diff --git a/src/graphRegex/GraphLexer.cpp b/src/graphRegex/GraphLexer.cpp
index f504ad025940c88058ce5949259c464ae2cedfb6..05a23d02cdbfe072337ea2cc6ed92410e914257b 100644
--- a/src/graphRegex/GraphLexer.cpp
+++ b/src/graphRegex/GraphLexer.cpp
@@ -79,7 +79,7 @@ std::shared_ptr<ParsingToken<gRegexTokenTypes>> GraphLexer::getNextToken(void){
 
                 if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,cKeyRegex))
                 {
-                    currentChars.pop_back(); //the last char is the problemes
+                    currentChars.pop_back(); //the last char is the problems
                     break;
                 }
                 else if (std::regex_match(currentChars,cKeyRegex)){
@@ -89,7 +89,7 @@ std::shared_ptr<ParsingToken<gRegexTokenTypes>> GraphLexer::getNextToken(void){
                 if (mPosition < mRegularExpressions.length()) currentChars += mRegularExpressions[mPosition];
                 
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mRegularExpressions.length()-1)
diff --git a/src/graphRegex/matchFsm/FsmEdge.cpp b/src/graphRegex/matchFsm/FsmEdge.cpp
index 638aad3bc3f5c94d5b20420ed8cc0799daa08cc0..170d5e69366d25ba19ea2f514cab6cd59b545ec0 100644
--- a/src/graphRegex/matchFsm/FsmEdge.cpp
+++ b/src/graphRegex/matchFsm/FsmEdge.cpp
@@ -141,7 +141,7 @@ FsmEdge::FsmEdge(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest
 {
     mNodeSource = source;
     mNodeDest   = dest;
-    // wen i make the edge I init the nodes
+    // when i make the edge I init the nodes
     // mNodeSource->addEdge(shared_from_this());
     // mNodeDest->addParent(mNodeSource);
 }
diff --git a/src/graphRegex/matchFsm/FsmGraph.cpp b/src/graphRegex/matchFsm/FsmGraph.cpp
index a56474e042cc44a68938b1d19e19a0c6841cb8cb..2ba11a4d26b933bdbfad5c91d127bfa2682473d4 100644
--- a/src/graphRegex/matchFsm/FsmGraph.cpp
+++ b/src/graphRegex/matchFsm/FsmGraph.cpp
@@ -33,7 +33,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
         for(auto fsmContext : walks){
             allContextSee.push_back(fsmContext);
             //if we are in a valid st we save it
-            //it's one solution of the posible solution of the matching
+            //it's one solution of the possible solution of the matching
             if(fsmContext->isOnValidState()){
                 //not save 2 time the same end point
                 if(!std::any_of(allValidContext.begin(), allValidContext.end(),
@@ -45,7 +45,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
 
             }
 
-            //dont test 2 time a fsmContext
+            //don't test 2 time a fsmContext
             std::vector<std::shared_ptr<FsmRunTimeContext>> tmpNextWalks = fsmContext->getActState()->test(fsmContext);
             for(auto PotentialFsmContext : tmpNextWalks){
 
@@ -135,15 +135,15 @@ void FsmGraph::mergeOneStartOneValid(const std::shared_ptr<FsmGraph> fsmGraph){
     if (startNodes.size() != 1 || validNodes.size() != 1){
 
         std::ostringstream errorMessage;
-        errorMessage <<"mergeOneStartOneValid  start size: " << startNodes.size() << " valide size : " << validNodes.size()
-        <<" can only merge FSM 1 start 1 valide";
+        errorMessage <<"mergeOneStartOneValid  start size: " << startNodes.size() << " valid size : " << validNodes.size()
+        <<" can only merge FSM 1 start 1 valid";
         throw std::runtime_error(errorMessage.str());
     }
 
     unionG(fsmGraph);
-    //for loop useless but for future merge it's coudl be used
+    //for loop useless but for future merge it's could be used
     for(auto valid : validNodes){
-        valid->unValid();
+        valid->invalid();
         for(auto start : startNodes){
             start->unStart();
             _mergeNode(start,valid);
@@ -179,7 +179,7 @@ void FsmGraph::_mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNod
     }
     nodes.clear();
 
-    //probagate source attribut
+    //probagate source attribute
     if(source->isValid()){
         dest->valid();
     }
diff --git a/src/graphRegex/matchFsm/FsmNode.cpp b/src/graphRegex/matchFsm/FsmNode.cpp
index 7bc4cf105b43a540bd0e9c686af35dd220611a09..6666d1a72a298f20bdae0eb1c51805e5ae133ba4 100644
--- a/src/graphRegex/matchFsm/FsmNode.cpp
+++ b/src/graphRegex/matchFsm/FsmNode.cpp
@@ -103,7 +103,7 @@ bool FsmNode::isValid(void){
 bool FsmNode::isStart(void){
     return mIsAStart;
 }
-void FsmNode::unValid(void){
+void FsmNode::invalid(void){
     mIsAValid =false;
 }
 void FsmNode::valid(void){
diff --git a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
index 7a09908e5629e299b6b264fbfaac97bdaf7fa316..89e7faf205ef515049de415a5f057db8a13105e9 100644
--- a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
+++ b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
@@ -74,7 +74,7 @@ bool FsmRunTimeContext::isAlreadyValid(NodePtr node){
 bool FsmRunTimeContext::areCompatible(std::shared_ptr<FsmRunTimeContext> fsmContext){
     /*
     see if 2 context can be merge
-    it need to have different  mValidNodes exept for common
+    it need to have different  mValidNodes except for common
     and the same idx for the common
     */
 
@@ -192,9 +192,9 @@ std::set<NodePtr> FsmRunTimeContext::getValidNodes(void){
 
 std::set<NodePtr> FsmRunTimeContext::getValidNodesNoCommon(void){
     std::set<NodePtr> differenceSet;
-    std::set<NodePtr> valide = getValidNodes();
+    std::set<NodePtr> valid = getValidNodes();
     std::set<NodePtr> common = getCommonNodes();
-    std::set_difference(valide.begin(), valide.end(), common.begin(), common.end(),std::inserter(differenceSet, differenceSet.end()));
+    std::set_difference(valid.begin(), valid.end(), common.begin(), common.end(),std::inserter(differenceSet, differenceSet.end()));
     return differenceSet;
 }
 
diff --git a/src/nodeTester/ConditionalInterpreter.cpp b/src/nodeTester/ConditionalInterpreter.cpp
index f40e62305334f740057f88ef21cdab749d64bd99..5d10762d93d2bc0e92bf9d15bb24255bb7e51768 100644
--- a/src/nodeTester/ConditionalInterpreter.cpp
+++ b/src/nodeTester/ConditionalInterpreter.cpp
@@ -8,11 +8,11 @@ using namespace Aidge;
 //ConditionalRegisterFunction
 ///////////////////////////////
 
-     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas){
+     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data){
 
         auto lambdaIt = mWlambda.find(key);
         if (lambdaIt != mWlambda.end()) {
-            return lambdaIt->second(datas);
+            return lambdaIt->second(data);
         }else {
             throw std::runtime_error("can not run Lambda due to invalid key: " + key);
         }
@@ -174,7 +174,7 @@ using namespace Aidge;
                         case ConditionalTokenTypes::RPAREN:
                         case ConditionalTokenTypes::STOP:
                         default:
-                            throw std::runtime_error("NODE TYPE NOT SUPORTED IN ConditionalInterpreter");
+                            throw std::runtime_error("NODE TYPE NOT SUPPORTED IN ConditionalInterpreter");
                     }
                 }catch(const std::exception& e){
                     std::ostringstream errorMessage;
@@ -188,7 +188,7 @@ using namespace Aidge;
 
 
     //////////////////////
-    //value convertor
+    //value converter
     /////////////////////
 
 
diff --git a/src/nodeTester/ConditionalLexer.cpp b/src/nodeTester/ConditionalLexer.cpp
index e70772fc1a5d6136fb56f5981d73bf6cb0622991..9cc480ab29e84a775d9e275fe6ba51dc11e6ea14 100644
--- a/src/nodeTester/ConditionalLexer.cpp
+++ b/src/nodeTester/ConditionalLexer.cpp
@@ -28,7 +28,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
             mPosition++;
             continue;
         }
-        //performe tokenisation, find a regex and make a new token
+        //perform tokenisation, find a regex and make a new token
         
         if (std::regex_match(currentChars,std::regex("\\&\\&")))// the AND TOKEN 
         {
@@ -86,7 +86,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
         //non const lent token
         /////
 
-        //LAMBDA, KEY , bool //the fuction TAG 
+        //LAMBDA, KEY , bool //the function TAG 
         else if (std::regex_match(currentChars,std::regex("[A-Za-z_]")))// the KEY TOKEN (a char next )
         {   
             //read all the key 
@@ -97,7 +97,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
             while ( mPosition < mConditionalExpressions.length()) {
                 if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,LambdaRegex))
                 {
-                    currentChars.pop_back(); //the last char is the problemes
+                    currentChars.pop_back(); //the last char is the problems
                     break;
                 }
                 else if (std::regex_match(currentChars,LambdaRegex)){
@@ -107,7 +107,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
                 //currentChars += mConditionalExpressions[mPosition];
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mConditionalExpressions.length()-1)
@@ -116,7 +116,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 {
                     throw badTokenError(currentChars,mPosition);
                 }
-                //mPosition++; // we stop all by going pos > lengt
+                //mPosition++; // we stop all by going pos > length
             }
 
 
@@ -153,7 +153,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
                 //currentChars += mConditionalExpressions[mPosition];
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mConditionalExpressions.length()-1)
@@ -189,7 +189,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (!std::regex_match(currentChars,strRegex)){
                      throw badTokenError(currentChars,mPosition);
                 }
-                //mPosition++; // we stop all by going pos > lengt
+                //mPosition++; // we stop all by going pos > length
             }
 
             mPosition++; // go after the last " 
diff --git a/src/nodeTester/ConditionalParser.cpp b/src/nodeTester/ConditionalParser.cpp
index ba40c561375e0c09eb86009d447a782ab99d5d0b..5cf6f8617612b09c1a61e694a56dc6ed4d0f2b39 100644
--- a/src/nodeTester/ConditionalParser.cpp
+++ b/src/nodeTester/ConditionalParser.cpp
@@ -76,7 +76,7 @@ std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::Conditional
         return constructAstLambda();
     }
 
-   throw std::runtime_error("ConditionalParser unknow val type "+ token->rep().str() + "\n" + mLexer.rep());
+   throw std::runtime_error("ConditionalParser unknown val type "+ token->rep().str() + "\n" + mLexer.rep());
 
 }
 
@@ -169,7 +169,7 @@ std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::Conditional
 
         std::shared_ptr<AstNode<ConditionalTokenTypes>> right = constructAstExpr(prec);
 
-        //i'm not sur what append to newNode
+        //i'm not sure what append to newNode
         //std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,constructAstCmpr()});
         std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,right});
         left = newNode;
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 29a9ee6252a0c2baa6e07bc56e60650685db6bdd..6de0854e8cdc166a3f938a166348db481956e792 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -72,7 +72,7 @@ bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
         const DimSize_t roi = end - start + 1;
 
         AIDGE_ASSERT(start < nbDims && end < nbDims, "'start' and 'end' must be < {}", nbDims);
-        AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+        AIDGE_ASSERT(roi> 1, "Invalid ROI for Shape");
 
         mOutputs[0]->resize({roi});
         return true;
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index e3ed13588d8c2b5ddde91d37fc926d675f0666a3..2191f14a150088dfa1d369d2ef31051e5ab16326 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -43,7 +43,7 @@ void Aidge::Split_OpImpl::forward() {
         {
             // Compute chunk position in input tensor
             DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
-            // Copy chunk in ouput
+            // Copy chunk in output
             op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
                                             splits[i] * stride_post, offset);
             offset += splits[i] * stride_post;
@@ -124,7 +124,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
         // Fill Split attr if empty
         if(this->split().empty()) {
             // In case the input Split is not provided, divide the dimension of Axis into equal slices
-            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} mustn't be bigger than dimension {}.", nbOutput, dimToSplit);
             DimSize_t baseSliceSize = dimToSplit / nbOutput;
 
             DimSize_t remainder = dimToSplit % nbOutput;
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index efe6296a351f69ef3a11d4e1bc04bd0b52d46a06..4ca7cc9831c091a8ea79051115decd489a4a03be 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -31,7 +31,7 @@ Elts_t StackProdConso::getRequiredMemory(
 
     const StackOp &op = dynamic_cast<const StackOp &>(mOp);
     // The produced data after one forward pass is simply the input size,
-    // we do not produce the whole output tensor everytime.
+    // we do not produce the whole output tensor every time.
     if (op.forwardStep() <= op.maxElements()) {
         return Elts_t::DataElts(op.getInput(inputIdx)->size());
     } else {
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 4c4de25282c487d023f9c184b015ac332e716b7b..50c8f561c1732d6f7f37ae5b8d6f03c4e135939c 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -37,7 +37,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         const std::shared_ptr<GraphView>  metaOpGraph = metaOp -> getMicroGraph();
         const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metaOpGraph -> getOrderedOutputs();
         if (outputNodes.size() != 1) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipie.");
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipe.");
         }
         convNode = outputNodes[0].first;
     }
@@ -99,7 +99,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         Log::notice("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
-    // Add bias if it is non existant, as there will be a bias after the fuse
+    // Add bias if it is non existent, as there will be a bias after the fuse
     if (!convOp->getInput(2)) {
         if (metaNode) {
             // Conv is inside a meta-operator, we add bias outside it
diff --git a/src/recipes/MatMulToFC.cpp b/src/recipes/MatMulToFC.cpp
index 9b5addd3bb971b3f61980a582d4cce6435c57219..8d902c680b8fa0d30a873b6f355734ce19d608f5 100644
--- a/src/recipes/MatMulToFC.cpp
+++ b/src/recipes/MatMulToFC.cpp
@@ -34,7 +34,7 @@ void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
 
 
     // Step 1 : Create FC
-    // Fetch the output dimension throught the bias size
+    // Fetch the output dimension through the bias size
     std::shared_ptr<Node> bias = nullptr;
     if (addNode) {
         if (addNode->getParent(0) == matmulNode) {
@@ -76,7 +76,7 @@ void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
 
-    // Instanciate FC
+    // Instantiate FC
     std::string fcName = matmulNode->name();
     if (addNode && !addNode->name().empty()) {
         fcName += "_" + addNode->name();
diff --git a/src/scheduler/MemoryManager.cpp b/src/scheduler/MemoryManager.cpp
index 6fe0d1f0745a464b8fd61bf634d7105b9d22faf8..ba805f919a607e0b2ae3272d173aa11360548fa7 100644
--- a/src/scheduler/MemoryManager.cpp
+++ b/src/scheduler/MemoryManager.cpp
@@ -898,7 +898,7 @@ Aidge::MemoryManager::getMaxHole(std::shared_ptr<MemorySpace> memSpace) const
                     std::make_pair((*itPlane).allocated, holeSize));
 
                 if (!newInsert) {
-                    // Another plane exists at the same time, one must substract
+                    // Another plane exists at the same time, one must subtract
                     // the size of this other plane from the hole size
                     (*it).second = std::max(0, static_cast<int>((*it).second)
                         - static_cast<int>((*itPlane).getContiguousSize())
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 2b9a1f5b62741d5f08dfc3e5aa45b1102d54b850..2a44dd49f961bdcdf965a33d2ffe91f3ed8ae352 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -46,7 +46,7 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
 
     const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
 
-    // Sort static scheduling, the order will be the prefered threads scheduling
+    // Sort static scheduling, the order will be the preferred threads scheduling
     // order for non critical nodes
     std::deque<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end());
     std::stable_sort(staticSchedule.begin(), staticSchedule.end(),
diff --git a/unit_tests/backend/Test_TensorImpl.cpp b/unit_tests/backend/Test_TensorImpl.cpp
index ceb6772d01d4ee84524896fead96abcb445f84ff..2f6ef519935295dce5edd0d486c9f5ba6e307331 100644
--- a/unit_tests/backend/Test_TensorImpl.cpp
+++ b/unit_tests/backend/Test_TensorImpl.cpp
@@ -34,7 +34,7 @@ TEST_CASE("[backend/cpu/data] Tensor", "[TensorImpl]") {
 }
 
 TEST_CASE("Tensor fill", "[TensorImpl][fill]") {
-  SECTION("Instantiate batches independantly") {
+  SECTION("Instantiate batches independently") {
     // initialization with 0s
     std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
     //concatenatedTensor->print();
diff --git a/unit_tests/graph/Test_Connector.cpp b/unit_tests/graph/Test_Connector.cpp
index 79acce9281039f9f3c67b7235d8999b6c7173685..3fe2318c81a95528e46900e82fa7e4b69f5e28f5 100644
--- a/unit_tests/graph/Test_Connector.cpp
+++ b/unit_tests/graph/Test_Connector.cpp
@@ -184,7 +184,7 @@ TEST_CASE("Connector Mini-graph", "[Connector]") {
     // g->save("TestGraph");
 }
 
-TEST_CASE("Structural descrition - Sequential", "[GraphView]") {
+TEST_CASE("Structural description - Sequential", "[GraphView]") {
     // SECTION("Empty Sequence") {
     //     std::shared_ptr<GraphView> g1 = Sequential(); // Not supported
     //     REQUIRE(g1->getNodes() == std::set<std::shared_ptr<Node>>());
@@ -256,7 +256,7 @@ TEST_CASE("Structural description - Parallel", "[GraphView]") {
     }
 }
 
-TEST_CASE("Strucutral Description - Complex Graph", "[GraphView]") {
+TEST_CASE("Structural Description - Complex Graph", "[GraphView]") {
     std::shared_ptr<Node> firstLayer = GenericOperator("first", 1, 0, 1);
     auto g = Sequential({firstLayer,
                     GenericOperator("l2", 1, 0, 1),
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 2fa06cf23b3b681211208a3e5bbea9226f0930b8..a7d02cd2fc1f3782046f3e8a9e7d7ca00b2ec5a7 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -195,7 +195,7 @@ TEST_CASE("remove", "[GraphView][remove]") {
             // g2->save("./remove2");
 
             REQUIRE(nodePtrTo(g1->getNodes(), nodePtrToName) == nodePtrTo(g2->getNodes(), nodePtrToName));
-            // Order not garanteed, because when a node is removed, it can create new GraphView inputs/outputs
+            // Order not guaranteed, because when a node is removed, it can create new GraphView inputs/outputs
             // Their order thus depends on the deletion order!
             //REQUIRE(nodePtrTo(g1->getOrderedInputs(), nodePtrToName) == nodePtrTo(g2->getOrderedInputs(), nodePtrToName));
             //REQUIRE(nodePtrTo(g1->getOrderedOutputs(), nodePtrToName) == nodePtrTo(g2->getOrderedOutputs(), nodePtrToName));
@@ -248,7 +248,7 @@ TEST_CASE("[core/graph] GraphView(add)", "[GraphView][add]") {
 
     SECTION("Several Nodes") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
-        // should automaticaly add parents for learnable parameters
+        // should automatically add parents for learnable parameters
         std::shared_ptr<Node> GOp1 = GenericOperator("Fictive", 0, 1, 1, "Gop1");
         std::shared_ptr<Node> GOp1parent = GenericOperator("Fictive", 0, 0, 1, "Gop1parent");
         GOp1parent->addChild(GOp1, 0, 0);
@@ -257,7 +257,7 @@ TEST_CASE("[core/graph] GraphView(add)", "[GraphView][add]") {
         REQUIRE(nodePtrTo(g->getOrderedInputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({}));
         REQUIRE(nodePtrTo(g->getOrderedOutputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({{"Gop1", 0}}));
 
-        // there should be no deplicates
+        // there should be no duplicates
         g->add(GOp1);
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({GOp1, GOp1parent}));
         REQUIRE(nodePtrTo(g->getOrderedInputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({}));
@@ -396,7 +396,7 @@ TEST_CASE("[core/graph] GraphView(save)") {
 }
 
 TEST_CASE("[core/graph] GraphView(resetConnections)") {
-    SECTION("disconnect data iput") {
+    SECTION("disconnect data input") {
         std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
         std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 2, 1, "c1");
         std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
diff --git a/unit_tests/graphRegex/Test_FsmMatch.cpp b/unit_tests/graphRegex/Test_FsmMatch.cpp
index 008251feaac9d2dbe21aae3dfc7ebaa69e828ae7..6229ec62af96802ebdfe871e6058ab1791cf80fd 100644
--- a/unit_tests/graphRegex/Test_FsmMatch.cpp
+++ b/unit_tests/graphRegex/Test_FsmMatch.cpp
@@ -52,7 +52,7 @@ TEST_CASE("FsmMatch") {
     }
 
 
-    SECTION("2 branche graph"){
+    SECTION("2 branches graph"){
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
         std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
diff --git a/unit_tests/graphRegex/Test_GraphLexer.cpp b/unit_tests/graphRegex/Test_GraphLexer.cpp
index 1b8cc8e018546ebfe3f84202d9404db27b17449b..615c052041e40c2941054b7ddcc19229c0994f0d 100644
--- a/unit_tests/graphRegex/Test_GraphLexer.cpp
+++ b/unit_tests/graphRegex/Test_GraphLexer.cpp
@@ -104,7 +104,7 @@ TEST_CASE("GraphRegex", "Lexer") {
 
 
             std::ostringstream errorMessage;
-            errorMessage << "\n we whant :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
+            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
 
             CAPTURE(errorMessage.str());
             REQUIRE(token->getLexeme() == lexemToFind);
diff --git a/unit_tests/graphRegex/Test_graphRegexAST.cpp b/unit_tests/graphRegex/Test_graphRegexAST.cpp
index 1cdb0bc1934983a26ab742bfe8879455077219cc..f9c7a7c5dc8ab103b3b566f2df77883b0b1966f1 100644
--- a/unit_tests/graphRegex/Test_graphRegexAST.cpp
+++ b/unit_tests/graphRegex/Test_graphRegexAST.cpp
@@ -55,7 +55,7 @@ TEST_CASE("GraphStrInterpreter") {
         for (const std::string& test : tests) {
             std::shared_ptr<GraphStrInterpreter>  strGenerator = std::make_shared<GraphStrInterpreter>(test);
             std::string astString = strGenerator->interpret();
-            //supress space in the test becase erase in the AST
+            //suppress space in the test because erase in the AST
             std::string testNoS = test;
             testNoS.erase(std::remove_if(testNoS.begin(), testNoS.end(), ::isspace), testNoS.end());
             //if the last char is ; (SEP) it will not in the AST and it's not a bug erase it
diff --git a/unit_tests/nodeTester/Test_ConditionalLexer.cpp b/unit_tests/nodeTester/Test_ConditionalLexer.cpp
index a937c27227dde4fa03ed7733df9e9552c3c1ac7b..d79824e2e53d0c9621fdc6847ebca00faba03af4 100644
--- a/unit_tests/nodeTester/Test_ConditionalLexer.cpp
+++ b/unit_tests/nodeTester/Test_ConditionalLexer.cpp
@@ -130,7 +130,7 @@ TEST_CASE("nodeTester", "Lexer") {
 
 
             std::ostringstream errorMessage;
-            errorMessage << "\n we whant :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
+            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
 
             CAPTURE(errorMessage.str());
             REQUIRE(token->getLexeme() == lexemToFind);
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 41bad69749fd82f892c6faa625739d0493396c73..c82c55f165278c985dabd771cd6481a4839ada2c 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -82,7 +82,7 @@ TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         Testop.addAttr<long>("LongAttr", 3);
 
-        // This line should raise a failled assert
+        // This line should raise a failed assert
         REQUIRE_THROWS(Testop.getAttr<int>("LongAttribute"));
     }
 }
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
index 471a1dcd1e45384b2c65da75ddee9d3ec039dc34..660e970dd65bb7c4b9a52b0ccb62350ca355d243 100644
--- a/unit_tests/operator/Test_Squeeze_Op.cpp
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -156,7 +156,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
     }
     SECTION("axes is given via tensor") {
       SECTION("tensor is empty") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Squeeze(std::vector<std::int8_t>({0, 4}));
         auto op = std::static_pointer_cast<OperatorTensor>(
@@ -177,7 +177,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
         CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
       }
       SECTION("tensor not empty") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Squeeze(std::vector<std::int8_t>({3, 1}));
         auto op = std::static_pointer_cast<OperatorTensor>(
diff --git a/unit_tests/operator/Test_Unsqueeze_Op.cpp b/unit_tests/operator/Test_Unsqueeze_Op.cpp
index 79f5b89b1c08f409b214a9439431c2d2a51ddbd2..a436ab5a54e2f66fe87bfc28157d691cf6548dd4 100644
--- a/unit_tests/operator/Test_Unsqueeze_Op.cpp
+++ b/unit_tests/operator/Test_Unsqueeze_Op.cpp
@@ -177,7 +177,7 @@ TEST_CASE("[core/operator] Unsqueeze(forwardDims)",
       }
     }
     SECTION("axes is given via tensor") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Unsqueeze(std::vector<std::int8_t>({0, 4}));
         auto op = std::static_pointer_cast<OperatorTensor>(
diff --git a/unit_tests/recipes/Test_ExplicitTranspose.cpp b/unit_tests/recipes/Test_ExplicitTranspose.cpp
index 0c0a46710d69606508a22e7b01dac708db9b8f34..bb89ba7952347a779e6979e7cf3c4f1bd68abf9b 100644
--- a/unit_tests/recipes/Test_ExplicitTranspose.cpp
+++ b/unit_tests/recipes/Test_ExplicitTranspose.cpp
@@ -41,11 +41,11 @@ TEST_CASE("[ExplicitTranspose] conv") {
     g1->forwardDims();
     explicitTranspose(g1);
 
-    // Check that Tranpose were inserted
+    // Check that Transpose were inserted
     g1->save("explicitTranspose_after");
     REQUIRE(g1->getNodes().size() == 12);
 
-    // Check that Tranpose are removed
+    // Check that Transpose are removed
     conv2->getOperator()->setDataFormat(DataFormat::NCHW);
     explicitTranspose(g1);
 
diff --git a/unit_tests/recipes/Test_removeConstantOfShape.cpp b/unit_tests/recipes/Test_removeConstantOfShape.cpp
index 247149a0fdb1087f14ac17d125659d677ccfb506..b912efc640fc901f694afeda256be91d51010419 100644
--- a/unit_tests/recipes/Test_removeConstantOfShape.cpp
+++ b/unit_tests/recipes/Test_removeConstantOfShape.cpp
@@ -32,8 +32,8 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/recipies] removeConstantOfShape",
-          "[ConstantOfShape][removeConstantOfShape][recipies]") {
+TEST_CASE("[cpu/recipes] removeConstantOfShape",
+          "[ConstantOfShape][removeConstantOfShape][recipes]") {
   auto input_T = std::make_shared<Tensor>(Array1D<int64_t, 4>({1, 1, 3, 3}));
 
   auto model = std::make_shared<GraphView>();
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index c3b4c08d98115c9f081bbbf8cb677114b66c545a..1b5e2783813da890b1e79744582f54bb5c932772 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -24,7 +24,7 @@
 
 namespace Aidge {
 
-TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
+TEST_CASE("[cpu/recipes] RemoveFlatten", "[RemoveFlatten][recipes]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
   std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
diff --git a/unit_tests/utils/Test_StaticAttributes.cpp b/unit_tests/utils/Test_StaticAttributes.cpp
index 36c2e0454b415e1cb25cc3581016530a372b9e65..b23f8683e0ae3a2de805770556fefdd66722460d 100644
--- a/unit_tests/utils/Test_StaticAttributes.cpp
+++ b/unit_tests/utils/Test_StaticAttributes.cpp
@@ -43,6 +43,6 @@ TEST_CASE("[core/attributes] StaticAttribute") {
             attr<TestAttr::d>({true, false, true}));
 
         REQUIRE(attrs.getAttr<int>("a") == 42);
-        REQUIRE_THROWS(attrs.getAttr<int>("inexistant"));
+        REQUIRE_THROWS(attrs.getAttr<int>("inexistent"));
     }
 }