diff --git a/.codespellrc b/.codespellrc
new file mode 100644
index 0000000000000000000000000000000000000000..ab384a7b1d8bd8cf0014d214890dce60c797b461
--- /dev/null
+++ b/.codespellrc
@@ -0,0 +1,12 @@
+[codespell]
+builtin = clear,rare,en-GB_to_en-US,names,informal,code
+check-filenames =
+check-hidden =
+skip = */.git,*/build,*/prefix,*/aidge_core,*/aidge_core.egg-info,*/cmake,.clang-format
+quiet-level = 2
+# childs : used a lot and understandable
+# dOut,inH,ro : used for testing
+# deque : cpp data struct
+# inout : commented code variable
+# nd : commented code
+ignore-words-list = childs, dOut, inH, ro, deque, inout, stdio, nd
diff --git a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1 b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
index c2715ea5550432838d3cc8692e97204b278d2c85..eb5658df3b96e3bc234e53d815bde4cecb4ed937 100644
--- a/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
+++ b/.gitlab/ci/cibuildwheel_build_deps_before_build_wheel.ps1
@@ -4,7 +4,7 @@ $ErrorActionPreference = "Stop"
 $AIDGE_DEPENDENCIES = $env:AIDGE_DEPENDENCIES -split ' '
 Write-Host "Aidge dependencies : $AIDGE_DEPENDENCIES"
 if ( $($AIDGE_DEPENDENCIES.Length) -eq 0) {
-        Write-Host "- No dependencies provided for current repsitory"
+        Write-Host "- No dependencies provided for current repository"
         New-Item -ItemType Directory -Force -Path ".\build" | Out-Null
         Remove-Item -Path ".\build\*" -Recurse -Force
     } else {
diff --git a/MANIFEST.in b/MANIFEST.in
index ae5b7c7c2e07eef97ef72bdb79cca94f8124981b..ed911dd75b59b65b8bfa023584aae8585de6325b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README.md LICENCE
+include README.md LICENSE
 recursive-include aidge_core *.py 
 recursive-exclude aidge_core/unit_tests *.py
 
diff --git a/README.md b/README.md
index fe8fd5a4252054c730be8e948d0d2e415c009d47..5fa6b938c6e333ac2c2292fc931749b3fd953b4f 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@ You can find here the C++ code of the Core library of Aidge.
 
 ## Pip installation
 
-To install aidge_core using pip, run the following command in your python environnement :
+To install aidge_core using pip, run the following command in your python environment :
 ``` bash
 pip install . -v
 ```
@@ -23,7 +23,7 @@ pip install . -v
 
 To setup aidge_core using pip in development (or editable mode), use the `--no-build-isolation -e` options to pip.
 
-For instance run the following command in your python environnement for a typical setup :
+For instance run the following command in your python environment for a typical setup :
 ``` bash
 export AIDGE_BUILD_TEST=ON              # enable C++ unit tests
 export AIDGE_PYTHON_BUILD_TYPE=         # default flags (no debug info but fastest build time)
@@ -85,7 +85,7 @@ make all install
 |   Option   | Value type | Description |
 |:----------:|:----------:|:-----------:|
 | *-DCMAKE_INSTALL_PREFIX:PATH* | ``str``  | Path to the install folder |
-| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimisations or "" (empty) , default= ``Release`` |
+| *-DCMAKE_BUILD_TYPE*          | ``str``  | If ``Debug``, compile in debug mode, ``Release`` compile with highest optimizations or "" (empty) , default= ``Release`` |
 | *-DWERROR*                    | ``bool`` | If ``ON`` show warning as error during compilation phase, default=``OFF`` |
 | *-DTEST*                      | ``bool`` | If ``ON`` build C++ unit tests, default=``ON`` |
 | *-DPYBIND*                    | ``bool`` | If ``ON`` activate python binding, default=``OFF`` |
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 649898dd130d5811f65f65af87bc117d3502647c..68e2a57b498551f6600d6b5720919d03b9bf037c 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -14,177 +14,73 @@
 
 #include <string>
 #include <vector>
-#include <functional>
 
 #include "aidge/utils/Types.h"
-#include "aidge/utils/DynamicAttributes.hpp"
-#include "aidge/data/Data.hpp"
 #include "aidge/data/Elts.hpp"
-#include "aidge/scheduler/ProdConso.hpp"
 
 namespace Aidge {
-class Node;
 class Operator;
 
-/**
- * @brief ImplSpec stores the requirements or the specifications of an implementation.
- *
- */
-struct ImplSpec {
-    struct IOSpec {
-        IOSpec(DataType type_, DataFormat format_ = DataFormat::Any, const std::vector<std::pair<int, int>>& dims_ = {}):
-            type(type_),
-            format(format_),
-            dims(dims_)
-        {}
-
-        DataType type;
-        DataFormat format;
-        std::vector<std::pair<int, int>> dims;
-    };
-
-    ImplSpec(const DynamicAttributes& attrs_ = DynamicAttributes());
-    ImplSpec(const IOSpec& io, const DynamicAttributes& attrs_ = DynamicAttributes());
-    ImplSpec(const IOSpec& i, const IOSpec& o, const DynamicAttributes& attrs_ = DynamicAttributes());
-    ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec>& o, const DynamicAttributes& attrs_ = DynamicAttributes());
-    ImplSpec(const Aidge::ImplSpec&);
-    ~ImplSpec() noexcept;
-
-    std::vector<IOSpec> inputs;
-    std::vector<IOSpec> outputs;
-    DynamicAttributes attrs;
-};
-
-inline bool operator==(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
-    return (lhs.type == rhs.type)
-        && (lhs.format == rhs.format)
-        && (lhs.dims == rhs.dims);
-}
-
-inline bool operator<(const ImplSpec::IOSpec& lhs, const ImplSpec::IOSpec& rhs) {
-    return (lhs.type < rhs.type)
-        || (lhs.type == rhs.type && lhs.format < rhs.format)
-        || (lhs.type == rhs.type && lhs.format == rhs.format && lhs.dims < rhs.dims);
-}
-
-inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
-    return (lhs.inputs < rhs.inputs)
-        || (lhs.inputs == rhs.inputs && lhs.outputs < rhs.outputs)
-        || (lhs.inputs == rhs.inputs && lhs.outputs == rhs.outputs && lhs.attrs < rhs.attrs);
-}
-
-
-inline bool operator==(const ImplSpec& lhs, const ImplSpec& rhs) {
-    return !(lhs < rhs) && !(rhs < lhs);
-}
-
-/**
- * @brief Impl stores the details of a specific implementation.
- * It is associated to a ImplSpec in a registry.
- *
- */
-template <class FwdFunc, class BwdFunc>
-struct Impl {
-    Impl(std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso_,
-      std::function<FwdFunc> forward_,
-      std::function<BwdFunc> backward_ = nullptr):
-        prodConso(prodConso_), forward(forward_), backward(backward_) {}
-
-    std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso;
-    std::function<FwdFunc> forward;
-    std::function<BwdFunc> backward;
-};
-
 class OperatorImpl {
 public:
     OperatorImpl(const Operator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
-    virtual std::shared_ptr<ProdConso> prodConso();
 
     const std::string& backend() const noexcept {
         return mBackend;
     }
+    /**
+     * @brief Minimum amount of data from a specific input required by the
+     * implementation to be run.
+     *
+     * @param inputIdx Index of the input analyzed.
+     * @return std::size_t
+     */
+    virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
-    const Operator& getOperator() const noexcept {
-        return mOp;
-    }
+    // Amount of input data that cannot be overwritten during the execution.
+    virtual Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const;
+
+    // Memory required at an output for a given input size.
+    virtual Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const;
 
     /**
-     * @brief Get the operator required implementation specification, according
-     * to the current operator configuration.
+     * @brief Total amount of consumed data from a specific input.
      *
+     * @param inputIdx Index of the input analyzed.
+     * @return DimSize_t
      */
-    ImplSpec getRequiredSpec() const;
+    virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
 
     /**
-     * @brief Get the best implementation that matches \p requiredSpecs.
-     * If no implementation matches \p requiredSpecs, \p requiredSpecs is
-     * returned.
+     * @brief Total amount of produced data ready to be used on a specific output.
      *
+     * @param outputIdx Index of the output analyzed.
+     * @return DimSize_t
      */
-    ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
+    virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
     /**
-     * @brief Get an adapted meta operator corresponding to the required
-     * specifications \p requiredSpecs from the implementation specifications
-     * \p spec.
+     * @brief Update the Consumer Producer system by simulating the consumption and production of i/o
      *
-     * @param spec Implementation specification
-     * @param requiredSpecs Required specifications
-     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
+    virtual void updateConsummerProducer();
 
     /**
-     * @brief Get the best adapted meta operator corresponding to the required
-     * specifications \p requiredSpecs.
-     * The best adaptation is the one with the lowest overhead cost.
-     * Currently, it is the one requiring the least number of additionnal
-     * operators to match the available implementations.
+     * @brief Reset the Consumer Producer system.
      *
-     * @param requiredSpecs Required specifications
-     * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
-    std::shared_ptr<Node> getBestAdaptation(const ImplSpec& requiredSpecs) const;
+    virtual void resetConsummerProducer();
 
     virtual ~OperatorImpl() = default;
 
 protected:
-    virtual std::shared_ptr<ProdConso> getProdConso() const;
-    virtual std::vector<ImplSpec> getAvailableImplSpecs() const;
-    bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
-
     const Operator &mOp;
     const std::string mBackend;
-    std::shared_ptr<ProdConso> mProdConso;
+    std::vector<Elts_t> mNbConsumedData;
+    std::vector<Elts_t> mNbProducedData;
 };
 } // namespace Aidge
 
-template<>
-struct fmt::formatter<Aidge::ImplSpec::IOSpec> {
-    template<typename ParseContext>
-    inline constexpr auto parse(ParseContext& ctx) {
-        return ctx.begin();
-    }
-
-    template<typename FormatContext>
-    inline auto format(Aidge::ImplSpec::IOSpec const& ioSpec, FormatContext& ctx) const {
-        return fmt::format_to(ctx.out(), "{}, {}, {}", ioSpec.type, ioSpec.format, ioSpec.dims);
-    }
-};
-
-template<>
-struct fmt::formatter<Aidge::ImplSpec> {
-    template<typename ParseContext>
-    inline constexpr auto parse(ParseContext& ctx) {
-        return ctx.begin();
-    }
-
-    template<typename FormatContext>
-    inline auto format(Aidge::ImplSpec const& implSpec, FormatContext& ctx) const {
-        return fmt::format_to(ctx.out(), "{}, {}", implSpec.inputs, implSpec.outputs);
-    }
-};
-
 #endif /* AIDGE_BACKEND_OPERATORIMPL_H_ */
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 57c6c385d5fdcc9f2439983bd04cc8ece0d8d8f5..864789c19181b52351fc09a63a787feaed31a216 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -144,7 +144,7 @@ public:
 
     /**
      * Return the raw device pointer.
-     * The raw pointer is garanteed to be valid only on the *same* device.
+     * The raw pointer is guaranteed to be valid only on the *same* device.
      * @param offset Offset, in number of elements.
     */
     virtual void* rawPtr(NbElts_t offset = 0) = 0;
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 9390fe5860b5d3523886856d9b2a40752d338af5..d04624fc530a21730cc4dc1f4f1ac90a58e6590b 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -23,7 +23,7 @@ namespace Aidge {
 
 template <class T>
 class TensorImpl_cpu : public TensorImpl {
-    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copyable");
+    static_assert(std::is_trivially_copyable<T>::value, "TensorImpl type should be trivially copiable");
 
 private:
     /// Pointer to the data and its capacity
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 6f877194252c7145ea61e1105e0edb0080409d46..7fa6de63f48561ef0585d5bb2f49b1a583710fb2 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -83,7 +83,7 @@ constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
 /**
  * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
  * @param src Source DataFormat
- * @param dst Destinatin DataFormat
+ * @param dst Destination DataFormat
  * @return DataFormatTranspose Permutation array to achieve a transposition
  *         from src to dst DataFormat.
 */
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 6c19b5355e406454a2e20bc8994d0ab04d53576a..ad145ca393e4d88210dbda98ab1cb8b37a2480ba 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -25,7 +25,7 @@ namespace Aidge {
 /**
  * @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
  * @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
- * @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
+ * @todo Implement readRandomBatch to compose batches from the database with a random sampling strategy. Necessary for training.
  */
 class DataProvider {
 private:
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 8d9f77bc41b0fa5225e7201d2e4d03eb2ff72502..9e6a54d0d009e2341ed419cea7786e29ac46cc9e 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -75,7 +75,7 @@ class Tensor : public Data,
      * @brief Construct a new Tensor object from an arithmetic parameter.
      *
      * @tparam T Type of the input parameter.
-     * @tparam VT Decayed type of the input paramter.
+     * @tparam VT Decayed type of the input parameter.
      * @param val Input value.
      */
     template<typename T,
@@ -274,7 +274,7 @@ class Tensor : public Data,
     Tensor operator+(const Tensor& other) const;
 
     /**
-     * @brief Element-wise substraction operation for two ``Tensor``s.
+     * @brief Element-wise subtraction operation for two ``Tensor``s.
      * @note ``Tensor``s should be stored on the same backend.
      * @todo If input ``Tensor``s have a different dataType, the output should
      * have the dataType of the ``Tensor`` with the highest precision.
@@ -432,7 +432,7 @@ public:
     }
 
     /**
-     * @brief Return if an implementaiton has been associated.
+     * @brief Return if an implementation has been associated.
      * @return true
      * @return false
      */
@@ -499,8 +499,8 @@ public:
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
-     * reshape), data is garanteed to remain valid.
-     * Otherwise, no garantee is provided regarding the validy of previous data
+     * reshape), data is guaranteed to remain valid.
+     * Otherwise, no guarantee is provided regarding the validy of previous data
      * (unlike std::vector). If the new overall size is larger than the previous
      * one, all previous data is invalided. Otherwise, previous data may or may
      * not remain valid, depending on the backend implementation.
@@ -515,8 +515,8 @@ public:
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
      * If the overall size is not changed (meaning we actually only performed a
-     * reshape), data is garanteed to remain valid.
-     * Otherwise, no garantee is provided regarding the validy of previous data
+     * reshape), data is guaranteed to remain valid.
+     * Otherwise, no guarantee is provided regarding the validy of previous data
      * (unlike std::vector). If the new overall size is larger than the previous
      * one, all previous data is invalided. Otherwise, previous data may or may
      * not remain valid, depending on the backend implementation.
@@ -684,7 +684,7 @@ public:
      * @note No memory copy is performed, the returned tensor does not own the memory.
      * @note If the number of coordinates matches the number of dimensions, a scalar
      * tensor is returned.
-     * @note If current tensor was contiguous, the returned tensor is garanteed to be
+     * @note If current tensor was contiguous, the returned tensor is guaranteed to be
      * contiguous as well.
      *
      * @param coordIdx Coordinates of the sub-tensor to extract
@@ -695,7 +695,7 @@ public:
     /**
      * @brief Returns a sub-tensor at some coordinate and with some dimension.
      *
-     * @note Data contiguity of the returned Tensor is not guaranted.
+     * @note Data contiguity of the returned Tensor is not guaranteed.
      *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
@@ -762,7 +762,7 @@ public:
     }
 
     /**
-     * Return a reference to a Tensor that is garanteed to be contiguous:
+     * Return a reference to a Tensor that is guaranteed to be contiguous:
      * - itself, if already contiguous;
      * - the provided Tensor, overwritten with the copied data.
      * The data type, backend and device stay the same.
diff --git a/include/aidge/data/half.hpp b/include/aidge/data/half.hpp
index 89df93cf3d10087833b3ad00dfbe3afd4e94c725..1464ac1e092e43059048825bf98d1186314b902c 100644
--- a/include/aidge/data/half.hpp
+++ b/include/aidge/data/half.hpp
@@ -213,11 +213,11 @@
 	#define HALF_ROUND_STYLE	-1			// = std::round_indeterminate
 #endif
 
-/// Tie-breaking behaviour for round to nearest.
+/// Tie-breaking behavior for round to nearest.
 /// This specifies if ties in round to nearest should be resolved by rounding to the nearest even value. By default this is
-/// defined to `0` resulting in the faster but slightly more biased behaviour of rounding away from zero in half-way cases (and
+/// defined to `0` resulting in the faster but slightly more biased behavior of rounding away from zero in half-way cases (and
 /// thus equal to the round() function), but can be redefined to `1` (before including half.hpp) if more IEEE-conformant
-/// behaviour is needed.
+/// behavior is needed.
 #ifndef HALF_ROUND_TIES_TO_EVEN
 	#define HALF_ROUND_TIES_TO_EVEN	0		// ties away from zero
 #endif
@@ -950,7 +950,7 @@ namespace half_float
 		/// Convert half-precision floating point to integer.
 		/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
 		/// \tparam E `true` for round to even, `false` for round away from zero
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<std::float_round_style R,bool E,typename T> T half2int_impl(uint16 value)
@@ -988,13 +988,13 @@ namespace half_float
 
 		/// Convert half-precision floating point to integer.
 		/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<std::float_round_style R,typename T> T half2int(uint16 value) { return half2int_impl<R,HALF_ROUND_TIES_TO_EVEN,T>(value); }
 
 		/// Convert half-precision floating point to integer using round-to-nearest-away-from-zero.
-		/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+		/// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits)
 		/// \param value binary representation of half-precision value
 		/// \return integral value
 		template<typename T> T half2int_up(uint16 value) { return half2int_impl<std::round_to_nearest,0,T>(value); }
@@ -1053,16 +1053,16 @@ namespace half_float
 
 	/// Half-precision floating point type.
 	/// This class implements an IEEE-conformant half-precision floating point type with the usual arithmetic operators and
-	/// conversions. It is implicitly convertible to single-precision floating point, which makes artihmetic expressions and
+	/// conversions. It is implicitly convertible to single-precision floating point, which makes arithmetic expressions and
 	/// functions with mixed-type operands to be of the most precise operand type. Additionally all arithmetic operations
 	/// (and many mathematical functions) are carried out in single-precision internally. All conversions from single- to
 	/// half-precision are done using the library's default rounding mode, but temporary results inside chained arithmetic
 	/// expressions are kept in single-precision as long as possible (while of course still maintaining a strong half-precision type).
 	///
 	/// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and
-	/// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which
+	/// extended definitions it is both a standard layout type and a trivially copiable type (even if not a POD type), which
 	/// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the
-	/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not neccessarily have to be of
+	/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not necessarily have to be of
 	/// exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will most
 	/// probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying 16-bit
 	/// IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16 bits if
@@ -2155,25 +2155,25 @@ namespace half_float
 		/// \name Arithmetic operators
 		/// \{
 
-		/// Add halfs.
+		/// Add halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return sum of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator+(T x, U y) { return functions::plus(x, y); }
 
-		/// Subtract halfs.
+		/// Subtract halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return difference of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator-(T x, U y) { return functions::minus(x, y); }
 
-		/// Multiply halfs.
+		/// Multiply halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return product of half expressions
 		template<typename T,typename U> typename enable<expr,T,U>::type operator*(T x, U y) { return functions::multiplies(x, y); }
 
-		/// Divide halfs.
+		/// Divide halves.
 		/// \param x left operand
 		/// \param y right operand
 		/// \return quotient of half expressions
@@ -2181,7 +2181,7 @@ namespace half_float
 
 		/// Identity.
 		/// \param arg operand
-		/// \return uncahnged operand
+		/// \return unchanged operand
 		template<typename T> HALF_CONSTEXPR typename enable<T,T>::type operator+(T arg) { return arg; }
 
 		/// Negation.
@@ -2330,28 +2330,28 @@ namespace half_float
 		inline expr exp2(half arg) { return functions::exp2(arg); }
 		inline expr exp2(expr arg) { return functions::exp2(arg); }
 
-		/// Natural logorithm.
+		/// Natural logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base e
 //		template<typename T> typename enable<expr,T>::type log(T arg) { return functions::log(arg); }
 		inline expr log(half arg) { return functions::log(arg); }
 		inline expr log(expr arg) { return functions::log(arg); }
 
-		/// Common logorithm.
+		/// Common logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base 10
 //		template<typename T> typename enable<expr,T>::type log10(T arg) { return functions::log10(arg); }
 		inline expr log10(half arg) { return functions::log10(arg); }
 		inline expr log10(expr arg) { return functions::log10(arg); }
 
-		/// Natural logorithm.
+		/// Natural logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg plus 1 to base e
 //		template<typename T> typename enable<expr,T>::type log1p(T arg) { return functions::log1p(arg); }
 		inline expr log1p(half arg) { return functions::log1p(arg); }
 		inline expr log1p(expr arg) { return functions::log1p(arg); }
 
-		/// Binary logorithm.
+		/// Binary logarithm.
 		/// \param arg function argument
 		/// \return logarithm of \a arg to base 2
 //		template<typename T> typename enable<expr,T>::type log2(T arg) { return functions::log2(arg); }
@@ -2620,7 +2620,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type ldexp(T arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half ldexp(half arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half ldexp(expr arg, int exp) { return functions::scalbln(arg, exp); }
@@ -2636,7 +2636,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type scalbn(T arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half scalbn(half arg, int exp) { return functions::scalbln(arg, exp); }
 		inline half scalbn(expr arg, int exp) { return functions::scalbln(arg, exp); }
@@ -2644,7 +2644,7 @@ namespace half_float
 		/// Multiply by power of two.
 		/// \param arg number to modify
 		/// \param exp power of two to multiply with
-		/// \return \a arg multplied by 2 raised to \a exp
+		/// \return \a arg multiplied by 2 raised to \a exp
 //		template<typename T> typename enable<half,T>::type scalbln(T arg, long exp) { return functions::scalbln(arg, exp); }
 		inline half scalbln(half arg, long exp) { return functions::scalbln(arg, exp); }
 		inline half scalbln(expr arg, long exp) { return functions::scalbln(arg, exp); }
@@ -2798,7 +2798,7 @@ namespace half_float
 		inline bool islessequal(expr x, half y) { return functions::islessequal(x, y); }
 		inline bool islessequal(expr x, expr y) { return functions::islessequal(x, y); }
 
-		/// Comarison for less or greater.
+		/// Comparison for less or greater.
 		/// \param x first operand
 		/// \param y second operand
 		/// \retval true if either less or greater
@@ -3027,7 +3027,7 @@ namespace std
 		/// Quiet NaN.
 		static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7FFF); }
 
-		/// Signalling NaN.
+		/// Signaling NaN.
 		static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7DFF); }
 
 		/// Smallest positive subnormal value.
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index c025ad770809864ac4e2d2c38e616e3d95e3d96a..76f5dcdfc28e90a3f83435841af21048bcb2a9c0 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -91,7 +91,7 @@ public:
 
     /**
      * @brief Set the node name.
-     * @warning Undefined behaviour when several Nodes have the same name.
+     * @warning Undefined behavior when several Nodes have the same name.
      * @param name New name for the node.
      */
     inline void setName(const std::string &name) { mName = name; }
@@ -184,7 +184,7 @@ public:
     /**
      * @brief List outside data input connections of the GraphView.
      * Data inputs exclude inputs expecting parameters (weights or bias).
-     * The vector size is garanteed to match the number of outside data inputs of the GraphView. If there is
+     * The vector size is guaranteed to match the number of outside data inputs of the GraphView. If there is
      * no external connection to a given input, a pair of nullptr and gk_IODefaultIndex is returned.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
@@ -214,7 +214,7 @@ public:
 
     /**
      * @brief List outside output connections of the GraphView. The vector
-     * size is garanteed to match the number of outputs of the GraphView. If there is
+     * size is guaranteed to match the number of outputs of the GraphView. If there is
      * no connection to a given output, the corresponding sub-vector will be empty.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
@@ -319,12 +319,12 @@ public:
      * - The childs and parents of the next node in the ranked list are then
      *   added to the list, and so on.
      * - Any remaining nodes have no path to the root node and are added in
-     *   arbitrary order. In this case, the ranking is not garanteed to be unique.
+     *   arbitrary order. In this case, the ranking is not guaranteed to be unique.
      *
-     * If the ranking cannot be garanteed to be unique, the second item indicates
-     * the rank from which unicity cannot be garanteed.
+     * If the ranking cannot be guaranteed to be unique, the second item indicates
+     * the rank from which unicity cannot be guaranteed.
      * @return std::pair<std::vector<NodePtr>, size_t> Pair with the list of ranked
-     * nodes and the size of the ranked sub-list where unicity is garanteed.
+     * nodes and the size of the ranked sub-list where unicity is guaranteed.
     */
     std::pair<std::vector<NodePtr>, size_t> getRankedNodes() const;
 
@@ -394,7 +394,7 @@ public:
      * @param fromOutNode Pointer to the already included Node the new Node will
      * be linked to (it will become a parent of the new Node). If the GraphView
      * only has one output Node, then default to this Node.
-     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * @param fromTensor Output Tensor ID of the already included Node. Default to
      * 0.
      * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
      * first available data input for the Node.
@@ -412,7 +412,7 @@ public:
      * be linked to (it will become a parent of the new Node). As a name is
      * optional, ensure such Node is in the GraphView or it will send back an
      * error message.
-     * @param fromTensor Ouput Tensor ID of the already included Node. Default to
+     * @param fromTensor Output Tensor ID of the already included Node. Default to
      * 0.
      * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
      * first available data input for the Node.
@@ -489,7 +489,7 @@ public:
      * Both sets should include all the necessary Producers.
      * @details There are 3 cases of replacement:
      * Case 1: same number of input/output connections for oldNodes and newNodes sets.
-     *     - input/output connections are replacated according to their IDs.
+     *     - input/output connections are replicated according to their IDs.
      * Case 2: different number of input/output connections for oldNodes and newNodes sets.
      *     - only a single parent/child node for the newNodes set, every input/output is
      *       connected to it.
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index b846af10b87b4088dab7fee41187ded91bf531d1..3b0874580b112f4c219886a78677e6c9801b72b8 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -53,7 +53,7 @@ public:
     struct MatchingResult {
         // Mutable is required to allow modifying MatchingResult members with a std::set
         // iterator. Any change should not modify the set ordering.
-        // We use graph->rootNode() as the std::set key, which is garanteed
+        // We use graph->rootNode() as the std::set key, which is guaranteed
         // to never change after insertion!
         mutable std::shared_ptr<GraphView> graph;
         mutable std::map<std::string, std::map<std::string, NodePtr>> anchors;
@@ -134,7 +134,7 @@ public:
      * QUERY = SEQ | NODE_OR_BLOCK (';' (SEQ | NODE_OR_BLOCK))*
      *
      * @param query The query to search.
-     * @param disjoint If true, only keep the longuest disjoint (non-overlapping) matches.
+     * @param disjoint If true, only keep the longest disjoint (non-overlapping) matches.
      * @return std::set<MatchingResult> Set of matches, each stored in a MatchingResult struct.
     */
     std::set<MatchingResult> match(const std::string& query, bool disjoint = false);
@@ -150,7 +150,7 @@ public:
     MatchingResult matchFrom(NodePtr startNode, const std::string& query);
 
     /**
-     * Filter to keep only the longuest disjoint (non-overlapping) matches.
+     * Filter to keep only the longest disjoint (non-overlapping) matches.
     */
     std::set<MatchingResult> filterLonguestDisjoint(const std::set<MatchingResult>& matches);
 
@@ -216,7 +216,7 @@ private:
         bool operator()(const MatchingResult& lhs, const MatchingResult& rhs) const {
             // Some matches size could be the same
             if (lhs.graph->getNodes().size() == rhs.graph->getNodes().size()) {
-                // In this case, use rootNode which is garanteed to be different!
+                // In this case, use rootNode which is guaranteed to be different!
                 return lhs.graph->rootNode() < rhs.graph->rootNode();
             }
 
@@ -226,7 +226,7 @@ private:
 };
 
 inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs, const Aidge::SinglePassGraphMatching::MatchingResult& rhs) {
-    // Matches rootNode are garanteed to be different!
+    // Matches rootNode are guaranteed to be different!
     return lhs.graph->rootNode() < rhs.graph->rootNode();
 }
 }  // namespace Aidge
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 51cc9c444edf03febf4416149e9160df0bbfca9c..a16bbd63ecf52e8c97d5032c5c90a5f69186f995 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -135,7 +135,7 @@ public:
 
   /**
    * @brief Set the Node name.
-   * @warning Undefined behaviour when several Nodes have the same name.
+   * @warning Undefined behavior when several Nodes have the same name.
    * @param name New name for the node.
    */
   void setName(const std::string &name);
@@ -144,7 +144,7 @@ public:
    * @brief Given the parameter name generate a new name which is unique
    * in all the GraphView which contains this node.
    * To generate the new name the method is called recursively and append
-   * the caracter ``_``.
+   * the character ``_``.
    * If no duplicate return name, this is the exit condition.
    * @param name Base name to make unique.
    * @return A unique name in all the GraphView which contains this one.
@@ -191,7 +191,7 @@ public:
   bool valid() const;
 
   /**
-   * @brief List of pair <Parent, ID of the data intput>. When an input is not
+   * @brief List of pair <Parent, ID of the data input>. When an input is not
    * linked to any Parent, the pair is <nullptr, gk_IODefaultIndex>.
    * Data inputs exclude inputs expecting parameters (weights or bias).
    * @return std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>>
@@ -240,7 +240,7 @@ public:
 
   /**
    * @brief List input ids of children linked to outputs of the node. The vector
-   * size is garanteed to match the number of outputs of the node. If there is
+   * size is guaranteed to match the number of outputs of the node. If there is
    * no connection to a given output, the corresponding sub-vector will be empty.
    * @return std::vector<std::vector<std::pair<std::shared_ptr<Node>,
    * IOIndex_t>>>
@@ -333,7 +333,7 @@ public:
    * @param outId ID of the current Node output to connect to the other Node.
    * Default to 0.
    * @param otherInId ID of the other Node input to connect to the current Node.
-   * Default to the first avaible data input.
+   * Default to the first available data input.
    */
   void addChild(NodePtr otherNode,
                 const IOIndex_t outId = IOIndex_t(0),
@@ -410,7 +410,7 @@ public:
   bool removeChild(const NodePtr nodePtr, const IOIndex_t outId = 0);
 
   /**
-   * @brief Remove every link of surrounding nodes to it and conversly
+   * @brief Remove every link of surrounding nodes to it and conversely
    */
   void resetConnections(bool includeLearnableParam = false);
 
@@ -546,7 +546,7 @@ private:
    */
   void addParent(const NodePtr otherNode, const IOIndex_t inId);
 
-  // OPERATOR FUNCTIONNAL but commented out to avoid iostream inclusion
+  // OPERATOR FUNCTIONAL but commented out to avoid iostream inclusion
   // /**
   //  * @brief operator<< overload to ease print & debug of nodes
   //  * @param[inout] ostream to print to
diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp
index 2c25ac0b76368242891e6e5ba92c2c5fc913a23c..b165891ff1c8a55e565e3520813b707303ddfd1f 100644
--- a/include/aidge/graphRegex/GraphParser.hpp
+++ b/include/aidge/graphRegex/GraphParser.hpp
@@ -17,7 +17,7 @@ class GraphParser {
 public:
     /**
      * @brief AST graph creation function
-     * @param gRegexExpressions String representing the logical fuction to be performed
+     * @param gRegexExpressions String representing the logical function to be performed
      */
     GraphParser(const std::string gRegexExpressions);
 
diff --git a/include/aidge/graphRegex/GraphRegex.hpp b/include/aidge/graphRegex/GraphRegex.hpp
index 573447cf934b196e8b0c32d7a58e1977f5aa5f9a..f0f8e68e41a09cb54fb7528cb7f6ce065674af02 100644
--- a/include/aidge/graphRegex/GraphRegex.hpp
+++ b/include/aidge/graphRegex/GraphRegex.hpp
@@ -68,14 +68,14 @@ class GraphRegex{
 
     /**
      *  @brief brief match the queries in the graph 
-     *  @param ref the graph were the querys in search 
+     *  @param ref the graph were the queries in search 
      *  @return the result  
     */
     std::set<std::shared_ptr<MatchSolution>> match(std::shared_ptr<GraphView> ref);
 
     /***
-     *  @brief  match the queries in the graph and applied the recipes fuction  
-     *  @param ref the graph were the querys in search 
+     *  @brief  match the queries in the graph and applied the recipes function  
+     *  @param ref the graph were the queries in search 
     */
     void appliedRecipes(std::shared_ptr<GraphView> ref);
 
diff --git a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
index a6cc3e59247d4be98caa9881182bfba1c44e0178..6397da49478c44ef6050c5bad77f12ba10efaca7 100644
--- a/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmEdge.hpp
@@ -60,7 +60,7 @@ namespace Aidge{
         virtual const EdgeTestResult test(const std::shared_ptr<FsmRunTimeContext> stmContext) =0;
 
         /**
-        *  @brief test is the egde test a common node
+        *  @brief test is the edge test a common node
         *  @return true if is a common
         */
         virtual bool isCommon(void);
@@ -70,7 +70,7 @@ namespace Aidge{
         */
         virtual size_t getCommonIdx(void);
         /**
-         * @brief get the relative postion to the common node deffine in this edge
+         * @brief get the relative position to the common node define in this edge
          * @return map
         */
         const std::map<size_t,int>& getRelative(void);
@@ -116,7 +116,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class specialization for not commun node (node that must be match one Unique) transition
+     * @brief class specialization for not common node (node that must be match one Unique) transition
     */
     class FsmEdgeUnique:public FsmEdge
     {
@@ -127,7 +127,7 @@ namespace Aidge{
     };
 
     /**
-     * @brief class specialization for  commun node transition
+     * @brief class specialization for  common node transition
      * @see FsmEdge
     */
     class FsmEdgeCommon:public FsmEdge
@@ -135,7 +135,7 @@ namespace Aidge{
 
         private:
         /**
-         * @brief the map that defind the ralation between the commonKey find by the lexer and a unique id use to refer to the common node
+         * @brief the map that define the relation between the commonKey find by the lexer and a unique id use to refer to the common node
         */
         static std::map<std::string,int> mCommonIdxMap;
         /**
@@ -145,7 +145,7 @@ namespace Aidge{
         public:
 
         /**
-         * @brief constructor  commun node ,
+         * @brief constructor  common node ,
          * @details during construction,
          * the node key found by the lexer is converted to a unique id and the relative positions are updated.
         */
@@ -159,7 +159,7 @@ namespace Aidge{
 
 
     /**
-     * @brief class spesialisation for ref transition
+     * @brief class specialization for ref transition
      * @see FsmEdge
     */
     class FsmEdgeRef:public FsmEdge
diff --git a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
index d718009e87e5360981ff93ff808124581917c089..e7402b3f0973e4b9e7053b4d59c9ff63ca6dd496 100644
--- a/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmGraph.hpp
@@ -49,7 +49,7 @@ public:
 
     /**
      * @brief get the set of the valid states
-     * @return set of valide state
+     * @return set of valid state
     */
     const std::set<std::shared_ptr<FsmNode>> getValidNodes(void);
 
@@ -60,7 +60,7 @@ public:
     const std::set<std::shared_ptr<FsmNode>> getNodes(void);
 
     /**
-     * @brief set a groupe idx for all the nodes in the graph
+     * @brief set a group idx for all the nodes in the graph
     */
     void setGroupe(std::size_t groupeIdx);
 
diff --git a/include/aidge/graphRegex/matchFsm/FsmNode.hpp b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
index 7987c5ce33522ca7d43de1918d53e68738af6d18..f4636e0e025d26fa2afae88b6ffca28a511e9509 100644
--- a/include/aidge/graphRegex/matchFsm/FsmNode.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmNode.hpp
@@ -31,10 +31,10 @@ namespace Aidge{
     /**
      * @brief is a node in the FSM graph, it's a state in the FSM
      * @details a state can be and/or :
-     * - a valide state, the match is valide if it stop on this edge
+     * - a valid state, the match is valid if it stop on this edge
      * - a start state , the match start on this state
      * The state is also define by this Origin (is the unique id of it's expretion )
-     * and it's groupe (for inner expression TODO)
+     * and it's group (for inner expression TODO)
     */
     class FsmNode : public std::enable_shared_from_this<FsmNode>
     {
@@ -84,7 +84,7 @@ namespace Aidge{
 
         bool isValid(void);
         bool isStart(void);
-        void unValid(void);
+        void invalid(void);
         void valid(void);
         void unStart(void);
         void start(void);
diff --git a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
index 36d09db47d23395d649a688252f2af803cb1bc9d..0b44172be0c3b671043fda884efadb84ba46e215 100644
--- a/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
+++ b/include/aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp
@@ -40,7 +40,7 @@ private:
     */
     std::map<NodePtr,std::size_t> mCommonNodes;
     /**
-     * @brief the map of the node that as been valid in this context , and the test that valide the node
+     * @brief the map of the node that as been valid in this context , and the test that valid the node
     */
     std::map<std::shared_ptr<ConditionalInterpreter>,std::set<NodePtr>> mValidNodes;
     /**
@@ -52,7 +52,7 @@ public:
      * @brief constructor
      * @param actState the actual state in the FSM
      * @param actOpNode the actual node in the graph
-     * @param idxRejeced the idx in the global regected node vector init max() as sentinel value of undefind
+     * @param idxRejeced the idx in the global regected node vector init max() as sentinel value of undefined
     */
     FsmRunTimeContext(std::shared_ptr<FsmNode> actState ,NodePtr actOpNode ,std::size_t idxRejeced =std::numeric_limits<std::size_t>::max() );
     FsmRunTimeContext(std::shared_ptr<FsmRunTimeContext> fsmRunTime);
@@ -85,7 +85,7 @@ public:
 
     /**
      * @ingroup FsmRunTimeContextTest
-     * @brief test if the actual state is valide
+     * @brief test if the actual state is valid
      * @return bool
      */
     bool isOnValidState(void);
diff --git a/include/aidge/nodeTester/ConditionalData.hpp b/include/aidge/nodeTester/ConditionalData.hpp
index 12df32a728571678a3885f9981e526e1d73db785..c6c521bd9c3e1a0333bb2a6c38545bb2bf6f3fe6 100644
--- a/include/aidge/nodeTester/ConditionalData.hpp
+++ b/include/aidge/nodeTester/ConditionalData.hpp
@@ -12,7 +12,7 @@ namespace Aidge{
 
 
 /////////////////////////
-// The data type in AST Intepretation
+// The data type in AST Interpretation
 ////////////////////////
 
 class BaseConditionalValue {
diff --git a/include/aidge/nodeTester/ConditionalInterpreter.hpp b/include/aidge/nodeTester/ConditionalInterpreter.hpp
index af6a3b920bb9ca389724860d55250d7ef4540677..713a166ec2cea7781ce98c850ecbf587eca58678 100644
--- a/include/aidge/nodeTester/ConditionalInterpreter.hpp
+++ b/include/aidge/nodeTester/ConditionalInterpreter.hpp
@@ -37,7 +37,7 @@ class ConditionalRegisterFunction {
      */
     template <typename T>
     T safeCastInput( std::shared_ptr<ConditionalData> data) {
-        //cnvertion and type cheking
+        //cnvertion and type checking
         if (data->isTypeEqualTo<T>()){
             return data->getValue<T>();
         }else{
@@ -123,7 +123,7 @@ class ConditionalRegisterFunction {
      */
     template <class F, std::size_t... ParamsIdx>
     auto funcPointer(F f, std::index_sequence<ParamsIdx...>) {
-        //wrapp the lambda in a new one that as ConditionalData as inputs and output
+        //wrap the lambda in a new one that as ConditionalData as inputs and output
     	return [this,f](std::vector< std::shared_ptr<ConditionalData>>  &args) {
             if (args.size() < sizeof...(ParamsIdx)){
                 std::ostringstream errorMessage;
@@ -199,10 +199,10 @@ class ConditionalRegisterFunction {
      /**
      * @brief Runs the function associated with the given key, using the provided vector of input data.
      * @param key The key of the function to run.
-     * @param datas The vector of input data.
+     * @param data The vector of input data.
      * @return A pointer to the output ConditionalData object.
      */
-     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas);
+     std::shared_ptr<ConditionalData> run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data);
 
     bool isLambdaRegister(const std::string &key) {
         if(mWlambda.find(key) != mWlambda.end()){
@@ -237,7 +237,7 @@ class ConditionalInterpreter
      */
     std::shared_ptr<AstNode<ConditionalTokenTypes>> mTree;
     /**
-     * @brief the registery for the lambda fuction
+     * @brief the registry for the lambda function
      * @see ConditionalRegisterFunction
     */
     ConditionalRegisterFunction mLambdaRegister;
@@ -275,8 +275,8 @@ class ConditionalInterpreter
 
     /**
      * @brief Test a node depending of the ConditionalExpressions
-     * @details the AST is visit using \ref visit() whith the $ init whit the nodeOp
-     * @return bool the match node has the initialized expresion
+     * @details the AST is visit using \ref visit() with the $ init with the nodeOp
+     * @return bool the match node has the initialized expression
      * @see visit() This function uses the visit() function to perform the evaluation.
      */
     bool test( const NodePtr nodeOp);
@@ -295,7 +295,7 @@ class ConditionalInterpreter
     private:
     /**
      * @brief Recursive AST traversal function, using the for interpreting AST nodes function,
-     * using \ref ASTnodeInterpreterF fuctions
+     * using \ref ASTnodeInterpreterF functions
      * @param NodeOp The node currently being tested
      * @param nodes The AST given by the parsing process
      */
diff --git a/include/aidge/nodeTester/ConditionalLexer.hpp b/include/aidge/nodeTester/ConditionalLexer.hpp
index fcfb9ebe783ac719076ce675e6fc3d78caf5be07..0cf15d968bb6dae7532a1bcbb6c77b98ba0e42c6 100644
--- a/include/aidge/nodeTester/ConditionalLexer.hpp
+++ b/include/aidge/nodeTester/ConditionalLexer.hpp
@@ -65,7 +65,7 @@ private:
 
 /**
  * @brief Constructs an error message to display the character not understood by the lexer
- * @return error mesage
+ * @return error message
  */
 std::runtime_error badTokenError(const std::string& currentChars,std::size_t position);
 
diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp
index 1f3671ea5b68008a67be5d6a63d09051d49939d5..06b0e112cfe9bba6a4f0bf32eb1b793326a357f8 100644
--- a/include/aidge/nodeTester/ConditionalParser.hpp
+++ b/include/aidge/nodeTester/ConditionalParser.hpp
@@ -34,7 +34,7 @@ class ConditionalParser {
     public:
     /**
      * @brief AST graph creation function
-     * @param ConditionalExpressions String representing the logical fuction to be performed
+     * @param ConditionalExpressions String representing the logical function to be performed
      */
     ConditionalParser(const std::string ConditionalExpressions);
 
@@ -87,7 +87,7 @@ class ConditionalParser {
     std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstLambda(void);
     /**
     * @ingroup ParsingFunctions
-    * @brief Function of grammar rules for a expresion : cmpr ((AND | OR) cmpr)*
+    * @brief Function of grammar rules for a expression : cmpr ((AND | OR) cmpr)*
     * @return AST node
     */
     std::shared_ptr<AstNode<ConditionalTokenTypes>> constructAstExpr(std::size_t precLimit = 0);
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 24476f231806bf38ae48b9e2d5ec405e072afdb2..17eca02261704e98341adca81636b594d92c2318 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,11 +26,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
-class Identity_OpImpl : public OperatorImpl {
-public:
-    Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
-    void forward() override;
-};
 
 /**
  * @brief Indentity_Op is an helper operator made to ease the declaration of MetaNodes.
@@ -40,7 +35,7 @@ public:
  *
  */
 class Identity_Op : public OperatorTensor,
-    public Registrable<Identity_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Identity_Op&)>> {
+    public Registrable<Identity_Op, std::string, std::unique_ptr<OperatorImpl>(const Identity_Op&)> {
 public:
     static const std::string Type;
 
@@ -59,8 +54,29 @@ public:
      */
     std::shared_ptr<Operator> clone() const override;
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
-    std::set<std::string> getAvailableBackends() const override;
+    // bool forwardDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
+
+    /**
+     * @brief Check if output dimensions have been computed.
+     * @note Since Identity has no output Tensor, this function checks if its
+     * only input's dimensions have been computed.
+     *
+     * @return true Input has dimensions.
+     * @return false Input has no dimensions or is a nullptr.
+     */
+    bool dimsForwarded() const override final;
+
+
+    void forward() override final;
+
+    void backward() override final { }
+
+    void setBackend(const std::string& /*name*/, DeviceIdx_t /*device*/ = 0) override final {
+        // setBackend do nothing, Identity node has no backend it just pass the same Tensor
+    }
+    void setDataType(const DataType& /*dataType*/) const override final {
+        // setDatatype do nothing, Identity node has no backend it just pass the same Tensor
+    }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index a799153e1db5eb83964ed06dd3bc0fb06da64de8..44c2b006dff40bb07d0a9a18112d4afc56a747f8 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -149,7 +149,7 @@ public:
 
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbRequiredData(const IOIndex_t inputIdx) const;
@@ -163,7 +163,7 @@ public:
     /**
      * @brief Total amount of consumed data from a specific input.
      *
-     * @param inputIdx Index of the input analysed.
+     * @param inputIdx Index of the input analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbConsumedData(const IOIndex_t inputIdx) const;
@@ -171,7 +171,7 @@ public:
     /**
      * @brief Total amount of produced data ready to be used on a specific output.
      *
-     * @param outputIdx Index of the output analysed.
+     * @param outputIdx Index of the output analyzed.
      * @return Elts_t
      */
     virtual Elts_t getNbProducedData(const IOIndex_t outputIdx) const;
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index c8cdd93810e18bd3cdd0a2d080e54aae2d787c66..19e2f13e4ff39fee181c6ad0cf2fbab510f22c3e 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -92,7 +92,7 @@ public:
 	 * @brief Will compute the dimensions of operator's output tensor given the input sizes
  	 *        If the output dimensions cannot be computed because it depends on some undefined inputs then forwardDims will return false and enter in TOKEN mode for subsequent tensors.
  	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
- 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors.
+ 	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optional parameter tensors.
  	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
  	 *      
      */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 811402420df170c011e478148cf646e6c585cc84..055e6fd1d8917ae015b88a223f1f8701fd9dce59 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -87,7 +87,7 @@ public:
 };
 
 /**
- * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @brief Extract a sub-Tensor from a bigger original Tensor.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 8c3a111c42dfeb2b4e27269839e41f3b362bdda3..5a5652388c3622bf8a46792b3c58e00c79de22f3 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -84,7 +84,7 @@ public:
 };
 
 /**
- * @brief Exract a sub-Tensor from a bigger original Tensor.
+ * @brief Extract a sub-Tensor from a bigger original Tensor.
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> A Node containing the Operator.
  */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 64a775eb4209ecad0e29decd8336ebb77bbe652f..5c966edaf27271da79f9950cdf007cfcf446dd8d 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -53,7 +53,7 @@ enum class SqueezeAttr {
  * @brief This operator has as purpose to remove dummy dimensions around given
  * axes.
  * input#0 : Tensor to squeeze
- * input#1 Optionnal : 1D tensor that lists the axes to squeeze
+ * input#1 Optional : 1D tensor that lists the axes to squeeze
  * @note the axes to squeeze can either be given via attribute or via input #1,
  * for the sake of simplicity of the example unders, the axes to squeeze are
  * given via attribute
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 2e397d1dbaa1cc8d8f586d15363cbd2245963152..880498515f36da4ecdf7f92aa7375981d5c67d10 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -33,7 +33,7 @@ namespace Aidge {
  * - A MemoryPlane is tailored for handling (N)HWC data with two properties:
  *   - Possibility of wrapping: on the H axis (each W*C block is contiguous).
  *   - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
- * - All the sizes and offets specified in a MemoryManager are expressed in
+ * - All the sizes and offsets specified in a MemoryManager are expressed in
  *   number of data elements, or **words**, meaning currently a uniform data 
  *   precision is expected in a MemoryManager (for instance, if the precision is
  *   16-bits, each data element will be 2 bytes, which will be the size of a word).
@@ -95,9 +95,9 @@ public:
      *   with different size, like NHWC = NHW(C1+C2):
      *   - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
      *   - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
-     *                    (with an additionnal relative offset of +C1)
+     *                    (with an additional relative offset of +C1)
      * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks
-     * are garanteed to be contiguous (\p length * \p stride).
+     * are guaranteed to be contiguous (\p length * \p stride).
      * 
      * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
      * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
@@ -255,7 +255,7 @@ public:
         /// with different size, like NHWC = NHW(C1+C2):
         /// - MemoryPlane#1: \p size = C1 and \p stride = C=C1+C2
         /// - MemoryPlane#2: \p size = C2 and \p stride = C=C1+C2
-        ///                  (with an additionnal relative offset of +C1)
+        ///                  (with an additional relative offset of +C1)
         /// By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
         /// there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
         /// In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
@@ -350,7 +350,7 @@ public:
                            unsigned int length = 1,
                            unsigned int count = 1);
     /// Generate a new MemoryPlane directly following an existing MemoryPlane
-    /// memPlane with an additionnal offset extraOffset
+    /// memPlane with an additional offset extraOffset
     MemoryPlane reallocate(const MemoryPlane& memPlane,
                            unsigned int extraOffset,
                            unsigned int size,
@@ -375,7 +375,7 @@ public:
                             unsigned int length = 1,
                             unsigned int count = 1);
     /// Generate a new MemoryPlane directly following an existing MemoryPlane
-    /// memPlane with an additionnal offset extraOffset
+    /// memPlane with an additional offset extraOffset
     unsigned int reallocate(const MemoryPlane& memPlane,
                             const std::shared_ptr<Node>& node,
                             unsigned int extraOffset,
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 2d03f4e8b8d5ce9c74f1d140a2e13317decc8dac..6e08885fc3f8966fba48be1c55a6965ac9e70775 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -190,7 +190,7 @@ protected:
 
     /**
      * @brief Generate an initial base scheduling for the GraphView.
-     * The scheduling is entirely sequential and garanteed to be valid w.r.t.
+     * The scheduling is entirely sequential and guaranteed to be valid w.r.t.
      * each node producer-consumer model.
      * @return Vector of pointers to `StaticSchedulingElement` representing the base schedule.
     */
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
index 3def790b65f441c567e5d43150f465233cb64557..af21d7912314c3eea1217811ae3e2b2da47a7a66 100644
--- a/include/aidge/stimuli/Stimulus.hpp
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -23,8 +23,8 @@
 
 namespace Aidge {
 /**
- * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
- * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
+ * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionally store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
+ * @details When Stimulus is used in the first mode, the loading function is determined automatically based on the backend and the file extension.
  */
 class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::function<std::unique_ptr<StimulusImpl>(const std::string&)>> {
 private:
diff --git a/include/aidge/utils/ArrayHelpers.hpp b/include/aidge/utils/ArrayHelpers.hpp
index 6648c654d28197dc018b94e8fa300366af52db4a..45a4c3c37da59e369bae2bb7e934c54bd844088d 100644
--- a/include/aidge/utils/ArrayHelpers.hpp
+++ b/include/aidge/utils/ArrayHelpers.hpp
@@ -85,7 +85,7 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a, std::index_sequen
  * @details append({1,2,7}, 3) -> {1,2,7,3}
  *
  * @tparam T Data type.
- * @tparam N Number of elements in the initilial array.
+ * @tparam N Number of elements in the initial array.
  * @param a Initial array.
  * @param t Element to add.
  * @return constexpr std::array<T, N + 1>
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index fd29bf4ce57ac94e0860172d2d1c15dc40f15ae0..20f93398df0e1453bad95be22479a37451665ee7 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -14,9 +14,6 @@
 
 #include <string>
 #include <set>
-#include <map>
-
-#include "aidge/utils/future_std/any.hpp"
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -66,15 +63,15 @@ public:
     */
     virtual std::set<std::string> getAttrsName() const = 0;
 
-    virtual std::map<std::string, future_std::any> getAttrs() const = 0;
-
 #ifdef PYBIND
-    /* Bindable get function, does not recquire any templating.
+    virtual bool hasAttrPy(const std::string& name) const = 0;
+
+    /* Bindable get function, does not require any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from its return type.
     */
     virtual py::object getAttrPy(const std::string& name) const  = 0;
-    /* Bindable set function, does not recquire any templating.
+    /* Bindable set function, does not require any templating.
     *  This is thanks to py::object which allow the function to
     *  be agnostic from ``value`` type.
     */
@@ -87,7 +84,6 @@ public:
     virtual py::dict dict() const = 0;
 
 #endif
-
     virtual ~Attributes() {}
 };
 }
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 1b55d7afbf8263a77cf70752fc92f72ef5027904..8f6f5c7dea1d099e8061644d5ae034309ba4185a 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -18,7 +18,6 @@
 #include <typeinfo>
 #include <cassert>
 #include <string>
-#include <typeindex>
 
 #include "aidge/utils/future_std/any.hpp"
 #include "aidge/utils/Attributes.hpp"
@@ -39,34 +38,31 @@ namespace Aidge {
 ///\todo managing complex types or excluding non-trivial, non-aggregate types
 class DynamicAttributes : public Attributes {
 public:
-    DynamicAttributes() = default;
-    DynamicAttributes(const std::map<std::string, future_std::any>& attrs): mAttrs(attrs) {}
-
     /**
      * \brief Returning an Attribute identified by its name
      * \tparam T expected Attribute type
      * \param name Attribute name
      * \details assert if T is not the actual Attribute type or if the Attribute does not
      *  exist
-     * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
+     * \note at() throws if the Attribute does not exist, using find to test for Attribute existence
      */
-    template<class T> T getAttr(const std::string& name) const
+    template<class T> const T& getAttr(const std::string& name) const
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
-
-            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            if (attr.type() == typeid(py::object)) {
-                // Note: because of cast<T>(), this function cannot return a const reference!
-                return future_std::any_cast<const py::object&>(attr).cast<T>();
+            // If attribute does not exist in C++, it might have been created or modified in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    // Insert the attribute back in C++
+                    mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
+                }
             }
-            else
 #endif
-            {
-                return future_std::any_cast<const T&>(attr);
-            }
+
+            return future_std::any_cast<const T&>(mAttrs.at(name));
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -76,21 +72,9 @@ public:
     }
 
     template<class T> T& getAttr(const std::string& name) {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
-
-            auto& attr = mAttrs.at(name);
-#ifdef PYBIND
-            AIDGE_ASSERT(attr.type() != typeid(py::object), "getAttr(): cannot return a reference to a Python-defined attribute.");
-#endif
-            return future_std::any_cast<T&>(attr);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            return future_std::any_cast<DynamicAttributes&>(mAttrs.at(ns)).getAttr<T>(nsName);
-        }
+        // Scott Meyers' solution to avoid code duplication
+        return const_cast<T&>(
+            static_cast<const DynamicAttributes&>(*this).getAttr<T>(name));
     }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
@@ -101,10 +85,17 @@ public:
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
-
             const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             AIDGE_ASSERT(res.second, "addAttr(): attribute \"{}\" already exists. Use setAttr() if this is expected.", name);
+
+#ifdef PYBIND
+            // We cannot handle Python object if the Python interpreter is not running
+            if (Py_IsInitialized()) {
+                // Keep a copy of the attribute in py::object that is updated every time
+                const auto& resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+                AIDGE_ASSERT(resPy.second, "addAttr(): attribute \"{}\" already exists (added in Python). Use setAttr() if this is expected.", name);
+            }
+#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -122,11 +113,19 @@ public:
     {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            mAnyUtils.emplace(typeid(T), std::unique_ptr<AnyUtils<T>>(new AnyUtils<T>()));
-
             auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
             if (!res.second)
                 res.first->second = future_std::any(value);
+
+#ifdef PYBIND
+            // We cannot handle Python object if the Python interpreter is not running
+            if (Py_IsInitialized()) {
+                // Keep a copy of the attribute in py::object that is updated every time
+                auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+                if (!resPy.second)
+                    resPy.first->second = std::move(py::cast(value));
+            }
+#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -140,6 +139,9 @@ public:
         const auto dot = name.find('.');
         if (dot == name.npos) {
             mAttrs.erase(name);
+#ifdef PYBIND
+            mAttrsPy.erase(name);
+#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -151,12 +153,41 @@ public:
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        addAttr(name, std::move(value));
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto it = mAttrs.find(name);
+            AIDGE_ASSERT(it == mAttrs.end(), "add_attr(): attribute \"{}\" already exists (added in C++). Use set_attr() if this is expected.", name);
+
+            const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
+            AIDGE_ASSERT(res.second, "add_attr(): attribute \"{}\" already exists. Use set_attr() if this is expected.", name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
+
+            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttrPy(nsName, std::move(value));
+        }
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
-        setAttr(name, std::move(value));
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
+            if (!resPy.second)
+                resPy.first->second = std::move(value);
+
+            // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
+            mAttrs.erase(name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
+
+            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttrPy(nsName, std::move(value));
+        }
     }
 
     py::dict dict() const override {
@@ -165,16 +196,9 @@ public:
             if (elt.second.type() == typeid(DynamicAttributes)) {
                 attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict();
             }
-            else {
-                // At this point, not every attribute may be known to mAnyUtils
-                const auto anyUtilsIt = mAnyUtils.find(elt.second.type());
-                if (anyUtilsIt != mAnyUtils.end()) {
-                    attributes[elt.first.c_str()] = anyUtilsIt->second->cast(elt.second);
-                }
-                else {
-                    attributes[elt.first.c_str()] = "???";
-                }
-            }
+        }
+        for (const auto& elt : mAttrsPy) {
+            attributes[elt.first.c_str()] = elt.second;
         }
         return attributes;
     }
@@ -197,7 +221,12 @@ public:
     bool hasAttr(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
+#ifdef PYBIND
+            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
+
+#else
             return (mAttrs.find(name) != mAttrs.cend());
+#endif
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -212,22 +241,45 @@ public:
         }
     }
 
+#ifdef PYBIND
+    bool hasAttrPy(const std::string& name) const override final {
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            // Attributes might have been created in Python, the second condition is necessary.
+            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto it = mAttrs.find(ns);
+            if (it != mAttrs.cend()) {
+                const auto nsName = name.substr(dot + 1);
+                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttrPy(nsName);
+            }
+            else {
+                return false;
+            }
+        }
+    }
+#endif
+
     std::string getAttrType(const std::string& name) const override final {
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
         // - C-style for C++ created attributes
         // - Python-style for Python created attributes
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            const auto& attr = mAttrs.at(name);
 #ifdef PYBIND
-            if (attr.type() == typeid(py::object)) {
-                return std::string(Py_TYPE(future_std::any_cast<const py::object&>(attr).ptr())->tp_name);
+            // If attribute does not exist in C++, it might have been created in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+                }
             }
-            else
 #endif
-            {
-                return attr.type().name();
-            }
+
+            return mAttrs.at(name).type().name();
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -240,20 +292,33 @@ public:
         std::set<std::string> attrsName;
         for(auto const& it: mAttrs)
             attrsName.insert(it.first);
+#ifdef PYBIND
+        // Attributes might have been created in Python
+        for(auto const& it: mAttrsPy)
+            attrsName.insert(it.first);
+#endif
         return attrsName;
     }
 
 #ifdef PYBIND
     /**
      * @detail See https://github.com/pybind/pybind11/issues/1590 as to why a
-     * generic type caster for std::any is not feasable.
-     * The strategy here is to store a cast() function for each attribute type ever used.
+     * generic type caster for std::any is not feasible.
+     * The strategy here is to keep a copy of each attribute in py::object that is updated every time.
     */
     inline py::object getAttrPy(const std::string& name) const override final {
         const auto dot = name.find('.');
         if (dot == name.npos) {
-            const auto& attr = mAttrs.at(name);
-            return mAnyUtils.at(attr.type())->cast(attr);
+            auto itPy = mAttrsPy.find(name);
+            if (itPy == mAttrsPy.end()) {
+                // Attribute may be a namespace
+                auto it = mAttrs.find(name);
+                AIDGE_ASSERT(it != mAttrs.end() && it->second.type() == typeid(DynamicAttributes), "get_attr(): attribute \"{}\" not found", name);
+                return py::cast(future_std::any_cast<const DynamicAttributes&>(it->second));
+            }
+            else {
+                return itPy->second;
+            }
         }
         else {
             const auto ns = name.substr(0, dot);
@@ -263,150 +328,25 @@ public:
     };
 #endif
 
-    future_std::any getAny(const std::string& name) const
-    {
-        const auto dot = name.find('.');
-        if (dot == name.npos) {
-            return mAttrs.at(name);
-        }
-        else {
-            const auto ns = name.substr(0, dot);
-            const auto nsName = name.substr(dot + 1);
-            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAny(nsName);
-        }
-    }
-
-    std::map<std::string, future_std::any> getAttrs() const override {
-        return mAttrs;
-    }
-
-    virtual ~DynamicAttributes() {
-#ifdef PYBIND
-        if (!Py_IsInitialized()) {
-            // Resets the internal pointer of py::object to nullptr without decreasing the object's reference count.
-            // At this point, the Python interpreter may have exited (it is the case if the current DynamicAttribute being destroyed is static),
-            // in which case py:object has already being destroyed despite the reference counting being > 0.
-            // See https://github.com/pybind/pybind11/issues/1598
-            for (auto& attr : mAttrs) {
-                if (attr.second.type() == typeid(py::object)) {
-                    future_std::any_cast<py::object&>(attr.second).release();
-                }
-            }
-        }
-#endif
-    }
-
-    friend bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs);
-    friend struct std::hash<DynamicAttributes>;
+    virtual ~DynamicAttributes() {}
 
 private:
-    std::map<std::string, future_std::any> mAttrs;
-
-public:
-    struct AnyUtils_ {
 #ifdef PYBIND
-        virtual py::object cast(const future_std::any& attr) const = 0;
-#endif
-        virtual bool compare(const future_std::any&, const future_std::any&) const = 0;
-        virtual size_t hash(const future_std::any&) const = 0;
-        virtual ~AnyUtils_() = default;
-    };
-
-    template <class T>
-    struct AnyUtils : public AnyUtils_ {
-#ifdef PYBIND
-        py::object cast(const future_std::any& attr) const override final {
-            return py::cast(future_std::any_cast<const T&>(attr));
-        }
-#endif
-
-        bool compare(const future_std::any& lhs, const future_std::any& rhs) const override final {
-#ifdef PYBIND
-            if (lhs.type() == typeid(py::object) && rhs.type() != typeid(py::object)) {
-                return (future_std::any_cast<py::object>(lhs).cast<T>() < future_std::any_cast<T>(rhs));
-            }
-            else if (lhs.type() != typeid(py::object) && rhs.type() == typeid(py::object)) {
-                return (future_std::any_cast<T>(lhs) < future_std::any_cast<py::object>(rhs).cast<T>());
-            }
-            else
+    // Stores C++ attributes (copy) and Python-only attributes
+    // Code should be compiled with -fvisibility=hidden
+    // See https://pybind11.readthedocs.io/en/stable/faq.html:
+    // “‘SomeClass’ declared with greater visibility than the type of its
+    // field ‘SomeClass::member’ [-Wattributes]”
+    // This map will only be populated if Python interpreter is running
+    std::map<std::string, py::object> mAttrsPy;
+    // Stores C++ attributes only
+    // mutable because it may be updated in getAttr() from Python
+    mutable std::map<std::string, future_std::any> mAttrs;
+#else
+    std::map<std::string, future_std::any> mAttrs;
 #endif
-            {
-                return (future_std::any_cast<T>(lhs) < future_std::any_cast<T>(rhs));
-            }
-        }
-
-        size_t hash(const future_std::any& attr) const override final {
-            return std::hash<T>()(future_std::any_cast<T>(attr));
-        }
-    };
-
-    // Stores typed utils functions for each attribute type ever used
-    static std::map<std::type_index, std::unique_ptr<AnyUtils_>> mAnyUtils;
 };
 
-template<> void DynamicAttributes::setAttr<future_std::any>(const std::string& name, const future_std::any& value);
-
-#ifdef PYBIND
-template <>
-struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUtils_ {
-    py::object cast(const future_std::any& attr) const override {
-        return future_std::any_cast<const py::object&>(attr);
-    }
-
-    bool compare(const future_std::any& lhs, const future_std::any& rhs) const override {
-        return (future_std::any_cast<py::object>(lhs) < future_std::any_cast<py::object>(rhs));
-    }
-
-    size_t hash(const future_std::any& attr) const override final {
-        // Here we are mixing Python and C++ hashes... if both are
-        // well implemented, this should not increase the collision 
-        // probability for the same number of stored hashes.
-        return py::hash(future_std::any_cast<py::object>(attr));
-    }
-};
-#endif
-
-inline bool operator<(const DynamicAttributes& lhs, const DynamicAttributes& rhs) {
-    return (lhs.mAttrs < rhs.mAttrs);
-}
-
-// Combine the hashes (boost-like hash combining, see boost::hash_combine())
-inline void hash_combine(std::size_t& seed, const std::size_t& value) {
-    seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
-}
-}
-
-namespace std {
-    // Make DynamicAttributes hashable so that is can be stored in hash-based containers.
-    // This is particularly useful in Python since set() and dict() are hash-based.
-    template <>
-    struct hash<Aidge::DynamicAttributes> {
-        size_t operator()(const Aidge::DynamicAttributes& attrs) const {
-            std::size_t seed = 0;
-            for (const auto& pair : attrs.mAttrs) {
-                Aidge::hash_combine(seed, std::hash<std::string>()(pair.first));
-                Aidge::hash_combine(seed, Aidge::DynamicAttributes::mAnyUtils.at(pair.second.type())->hash(pair.second));
-            }
-            return seed;
-        }
-    };
-
-    // General specialization of std::hash for any container that has iterators (e.g., std::vector, std::list, std::set)
-    template <template <typename...> class Container, typename T, typename... Args>
-    struct hash<Container<T, Args...>> {
-        std::size_t operator()(const Container<T, Args...>& iterable) const {
-            std::size_t seed = 0;
-            for (const auto& v : iterable) {
-                // Recursively hash the value pointed by the iterator
-                Aidge::hash_combine(seed, std::hash<T>()(v));
-            }
-            return seed;
-        }
-    };
-}
-
-namespace future_std {
-bool operator<(const future_std::any& lhs, const future_std::any& rhs);
 }
 
 #endif /* AIDGE_CORE_UTILS_DYNAMICATTRIBUTES_H_ */
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index cb9348dc24d1ac4c10b090e3676fabea2035a35b..6914d45109099a81d46a2111ffbdbae8d0f5d7ee 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -23,7 +23,7 @@
 
 namespace Aidge {
 /**
- * Helper to define a context anywhere, hidding the scoped variable name
+ * Helper to define a context anywhere, hiding the scoped variable name
  * which has no relevance.
 */
 #define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__)
@@ -68,7 +68,7 @@ public:
     /**
      * Detailed messages for debugging purposes, providing information helpful
      * for developers to trace and identify issues.
-     * Detailed insights of what is appening in an operation, not useful for the
+     * Detailed insights of what is happening in an operation, not useful for the
      * end-user. The operation is performed nominally.
      * @note This level is disabled at compile time for Release, therefore
      * inducing no runtime overhead for Release.
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 0468ae2616997c306bbd475fe6eb73cc033b0bcc..28dab05f80a64f12a59dc1f684652f66a96dc95f 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -14,7 +14,7 @@
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
-#include <pybind11/stl.h> // declare_registrable key can recquire stl
+#include <pybind11/stl.h> // declare_registrable key can require stl
 #include <pybind11/functional.h>// declare_registrable allow binding of lambda fn
 
 #endif
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 636863e292eeb677055dea379441ce422a6c90d8..439d2c638731b40bec0696a73b62b99e3bfddd41 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -53,7 +53,7 @@ public:
 */
 
     // Constructor for Attributes initialization.
-    // Compile-time garantee that every attribute is initialized.
+    // Compile-time guarantee that every attribute is initialized.
     template <ATTRS_ENUM ...attrsEnum> // non-type attribute pack
     constexpr StaticAttributes(const attr<attrsEnum>&&... attrs) {
         // Check number of attrs consistency
@@ -188,7 +188,7 @@ public:
     //////////////////////////////////////
     ///     Generic Attributes API
     //////////////////////////////////////
-    // Runtime existance check with name
+    // Runtime existence check with name
     bool hasAttr(const std::string& name) const override final {
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
diff --git a/include/aidge/utils/TensorUtils.hpp b/include/aidge/utils/TensorUtils.hpp
index 1bfe0929bf67bb0c6d3b893f3dbaf6993dcfd6ff..d7bfe68e1d4fff1565c575639e2259426909cd52 100644
--- a/include/aidge/utils/TensorUtils.hpp
+++ b/include/aidge/utils/TensorUtils.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
  * @tparam T should correspond to the type of the tensor, define the type of the absolute and relative error
  * @param t1  first :cpp:class:`Aidge::Tensor` to test
  * @param t2  second :cpp:class:`Aidge::Tensor` to test
- * @param relative relative difference allowed (should be betwen 0 and 1)
+ * @param relative relative difference allowed (should be between 0 and 1)
  * @param absolute absolute error allowed (shoulmd be positive)
  * @return true if both tensor are approximately equal and have the datatype, shape. Else return false
  */
diff --git a/include/aidge/utils/future_std/any.hpp b/include/aidge/utils/future_std/any.hpp
index 8d9bfe28d0497dc12c59aaed68a23d3a9563815e..4932417ef60bb59e66c105c2b3ff0bc0ad9fd3f7 100644
--- a/include/aidge/utils/future_std/any.hpp
+++ b/include/aidge/utils/future_std/any.hpp
@@ -229,14 +229,14 @@ private:
 
 
         /**
-         * Copies the **inner** content of the src union into the yet unitialized dest union.
+         * Copies the **inner** content of the src union into the yet uninitialized dest union.
          * As such, both inner objects will have the same state, but on separate memory locations.
          */
         void(*copy)(const storage_union& src, storage_union& dest);
 
 
         /**
-         * Moves the storage from src to the yet unitialized dest union.
+         * Moves the storage from src to the yet uninitialized dest union.
          * The state of src after this call is unspecified, caller must ensure not to use src anymore.
          */
         void(*move)(storage_union& src, storage_union& dest) noexcept;
@@ -376,7 +376,7 @@ protected:
     /**
      * Checks if two type infos are the same.
      * If ANY_IMPL_FAST_TYPE_INFO_COMPARE is defined, checks only the address of the
-     * type infos, otherwise does an actual comparision. Checking addresses is
+     * type infos, otherwise does an actual comparison. Checking addresses is
      * only a valid approach when there's no interaction with outside sources
      * (other shared libraries and such).
      */
diff --git a/include/aidge/utils/future_std/expected.hpp b/include/aidge/utils/future_std/expected.hpp
index c271d0e8d8066c0bcd0358f28f8bcd711a8b6ba0..ea29c811e68c20565ec28ab7f0237aac92c7ac57 100644
--- a/include/aidge/utils/future_std/expected.hpp
+++ b/include/aidge/utils/future_std/expected.hpp
@@ -7,7 +7,7 @@
 // Distributed under the Boost Software License, Version 1.0.
 // (See copy at http://www.boost.org/LICENSE_1_0.txt)
 //
-// expected lite is based on:
+// expected light is based on:
 //   A proposal to add a utility class to represent expected monad
 //   by Vicente J. Botet Escriba and Pierre Talbot. http:://wg21.link/p0323
 
@@ -96,7 +96,7 @@
 # endif
 #endif
 
-// at default use SEH with MSVC for no C++ exceptions
+// at default use SHE with MSVC for no C++ exceptions
 
 #ifndef  nsel_CONFIG_NO_EXCEPTIONS_SEH
 # define nsel_CONFIG_NO_EXCEPTIONS_SEH  ( nsel_CONFIG_NO_EXCEPTIONS && _MSC_VER )
@@ -1577,12 +1577,12 @@ private:
 
 #endif // nsel_CONFIG_NO_EXCEPTIONS
 
-/// x.x.8 unexpect tag, in_place_unexpected tag: construct an error
+/// x.x.8 unexpected tag, in_place_unexpected tag: construct an error
 
 struct unexpect_t{};
 using in_place_unexpected_t = unexpect_t;
 
-nsel_inline17 constexpr unexpect_t unexpect{};
+nsel_inline17 constexpr unexpect_t unexpected{};
 nsel_inline17 constexpr unexpect_t in_place_unexpected{};
 
 /// class error_traits
@@ -1965,7 +1965,7 @@ public:
     // x.x.4.2 destructor
 
     // TODO: ~expected: triviality
-    // Effects: If T is not cv void and is_trivially_destructible_v<T> is false and bool(*this), calls val.~T(). If is_trivially_destructible_v<E> is false and !bool(*this), calls unexpect.~unexpected<E>().
+    // Effects: If T is not cv void and is_trivially_destructible_v<T> is false and bool(*this), calls val.~T(). If is_trivially_destructible_v<E> is false and !bool(*this), calls unexpected.~unexpected<E>().
     // Remarks: If either T is cv void or is_trivially_destructible_v<T> is true, and is_trivially_destructible_v<E> is true, then this destructor shall be a trivial destructor.
 
     ~expected()
@@ -2016,7 +2016,7 @@ public:
     >
     expected & operator=( future_std::unexpected_type<G> const & error )
     {
-        expected( unexpect, error.value() ).swap( *this );
+        expected( unexpected, error.value() ).swap( *this );
         return *this;
     }
 
@@ -2029,7 +2029,7 @@ public:
     >
     expected & operator=( future_std::unexpected_type<G> && error )
     {
-        expected( unexpect, std::move( error.value() ) ).swap( *this );
+        expected( unexpected, std::move( error.value() ) ).swap( *this );
         return *this;
     }
 
@@ -2267,7 +2267,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F, value_type & >( detail::invoke( std::forward< F >( f ), value() ) )
-            : detail::invoke_result_nocvref_t< F, value_type & >( unexpect, error() );
+            : detail::invoke_result_nocvref_t< F, value_type & >( unexpected, error() );
     }
 
     template<typename F
@@ -2281,7 +2281,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F, const value_type & >( detail::invoke( std::forward< F >( f ), value() ) )
-            : detail::invoke_result_nocvref_t< F, const value_type & >( unexpect, error() );
+            : detail::invoke_result_nocvref_t< F, const value_type & >( unexpected, error() );
     }
 
 #if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
@@ -2296,7 +2296,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F, value_type && >( detail::invoke( std::forward< F >( f ), std::move( value() ) ) )
-            : detail::invoke_result_nocvref_t< F, value_type && >( unexpect, std::move( error() ) );
+            : detail::invoke_result_nocvref_t< F, value_type && >( unexpected, std::move( error() ) );
     }
 
     template<typename F
@@ -2310,7 +2310,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F, const value_type && >( detail::invoke( std::forward< F >( f ), std::move( value() ) ) )
-            : detail::invoke_result_nocvref_t< F, const value_type && >( unexpect, std::move( error() ) );
+            : detail::invoke_result_nocvref_t< F, const value_type && >( unexpected, std::move( error() ) );
     }
 #endif
 
@@ -2823,7 +2823,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F >( detail::invoke( std::forward< F >( f ) ) )
-            : detail::invoke_result_nocvref_t< F >( unexpect, error() );
+            : detail::invoke_result_nocvref_t< F >( unexpected, error() );
     }
 
     template<typename F
@@ -2837,7 +2837,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F >( detail::invoke( std::forward< F >( f ) ) )
-            : detail::invoke_result_nocvref_t< F >( unexpect, error() );
+            : detail::invoke_result_nocvref_t< F >( unexpected, error() );
     }
 
 #if !nsel_COMPILER_GNUC_VERSION || nsel_COMPILER_GNUC_VERSION >= 490
@@ -2852,7 +2852,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F >( detail::invoke( std::forward< F >( f ) ) )
-            : detail::invoke_result_nocvref_t< F >( unexpect, std::move( error() ) );
+            : detail::invoke_result_nocvref_t< F >( unexpected, std::move( error() ) );
     }
 
     template<typename F
@@ -2866,7 +2866,7 @@ public:
     {
         return has_value()
             ? detail::invoke_result_nocvref_t< F >( detail::invoke( std::forward< F >( f ) ) )
-            : detail::invoke_result_nocvref_t< F >( unexpect, std::move( error() ) );
+            : detail::invoke_result_nocvref_t< F >( unexpected, std::move( error() ) );
     }
 #endif
 
diff --git a/include/aidge/utilsParsing/ParsingToken.hpp b/include/aidge/utilsParsing/ParsingToken.hpp
index e303a5eabe6f7710873468f8edc8f3e844f4175f..8a1a740bf9bd4596675ef2dbd3e30af7765ffaa8 100644
--- a/include/aidge/utilsParsing/ParsingToken.hpp
+++ b/include/aidge/utilsParsing/ParsingToken.hpp
@@ -16,7 +16,7 @@ namespace Aidge{
         /**
          * @brief Token container
          * @param type one of the token type
-         * @param lexeme String representing aditional information of the token
+         * @param lexeme String representing additional information of the token
          */
         ParsingToken(const EnumType type , const std::string lexeme ):mLexeme(lexeme),mType(type){}
 
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index b972c87dcda8f912ff40feef0001b95d5feac71e..c3c301ea87116c61e6af28f93af6bfd709709a5c 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -251,7 +251,7 @@ static T castToNativeType(const py::object val_obj) {
 }
 
 static void addScalarCtor(pyTensorClass& mTensor) {
-    // Contructor based on bare py::object in order to match either
+    // Constructor based on bare py::object in order to match either
     // python scalars (int, float) or numpy scalars (np.int32, np.int64, ...).
     // There is a merge request to support numpy scalars in pybind, through py::numpy_scalar<T>
     // though it is not merged: https://github.com/pybind/pybind11/pull/3544/.
@@ -550,7 +550,7 @@ void init_Tensor(py::module& m){
     //   - np.ndarray of a given np.dtype: it will create an equivalent tensor of dtype == np.dtype when supported
     //   - np.dtype scalar: it will create an equivalent scalar tensor of dtype == np.dtype when supported
     //
-    // In order to implement this, we provide several overloads which are carefully ordered in order to fullfil
+    // In order to implement this, we provide several overloads which are carefully ordered in order to fulfill
     // the above requirements.
     //
 
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 4b9d2ad545c47971b7c0dff029585bb4c9ae5638..60d80e783d2e7d2e50d5f832b3508bf065edb707 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -95,7 +95,7 @@ void init_GraphView(py::module& m) {
           :type to_other_node: Node
           :param from_out_node: Node inside the GraphView the new Node will be linked to (it will become a parent of the new Node). If the GraphView only has one output Node, then default to this Node.
           :type from_out_node: Node
-          :param from_tensor: Ouput Tensor ID of the already included Node. Default to 0.
+          :param from_tensor: Output Tensor ID of the already included Node. Default to 0.
           :type from_tensor: int
           :param to_tensor: Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning first available data input for the Node.
           :type to_tensor: int
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 69a28960b57e6ba2ac8a699bf45ff09961fa4135..1f67b48a08d2fbe5db39436f599ecfc0f236268c 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -76,7 +76,7 @@ void init_Node(py::module& m) {
     :type other_node: :py:class: Node
     :param out_id: ID of the output of the current Node to connect to the other Node. (If Node has 1 output max ID is 0). Default to 0.
     :type out_id: int
-    :param other_in_id: ID of the input of the other Node to connect to the current Node (If the node is a Mul op it has 2 input then Max ID is 1).Default to the first avaible data input.
+    :param other_in_id: ID of the input of the other Node to connect to the current Node (If the node is a Mul op it has 2 input then Max ID is 1).Default to the first available data input.
     :type other_in_id: int
     )mydelimiter")
 
@@ -128,7 +128,7 @@ void init_Node(py::module& m) {
     R"mydelimiter(
     Get, for each output of the Node, a list of the children Node and the associated input index connected to it.
 
-    :return: List of a list of connections. When an outut is not linked to any child,  its list a empty.
+    :return: List of a list of connections. When an output is not linked to any child,  its list a empty.
     :rtype: list[list[tuple[Node, int]]]
     )mydelimiter")
 
@@ -187,7 +187,7 @@ void init_Node(py::module& m) {
             for (const auto &arg : args) {
                 // Check if the argument is an instance of Connector
                 if (pybind11::isinstance<Connector>(arg)) {
-                    // Convert Python object to C++ object adn push it ot vector
+                    // Convert Python object to C++ object and push it to vector
                     connectors.push_back(arg.cast<Connector>());
                 }
                 else if (arg.is(py::none())) {
diff --git a/python_binding/operator/pybind_ConstantOfShape.cpp b/python_binding/operator/pybind_ConstantOfShape.cpp
index b0d5ef2ef78380422ca1a137608f5289fa519aed..189337a384d55c91f6aceeb97c530ed92ef7b4d0 100644
--- a/python_binding/operator/pybind_ConstantOfShape.cpp
+++ b/python_binding/operator/pybind_ConstantOfShape.cpp
@@ -24,7 +24,7 @@ namespace Aidge {
 void init_ConstantOfShape(py::module &m) {
   py::class_<ConstantOfShape_Op, std::shared_ptr<ConstantOfShape_Op>, OperatorTensor>(
       m, "ConstantOfShapeOp", py::multiple_inheritance())
-      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &ConstantOfShape_Op::getInputsName)
       .def("get_outputs_name", &ConstantOfShape_Op::getOutputsName)
       .def("value", &ConstantOfShape_Op::value);
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 40c179c4064f07896113732a7e3c32db5f19c060..b61cb40cedbb5bfbc197c401454f205c737bc6ee 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -28,7 +28,7 @@ void init_Unsqueeze(py::module &m) {
 						with r = input_tensor.nbDims() + len(axes)
 		:type axes : :py:class: List[Int]
 		)mydelimiter")
-      // Here we bind the methods of the Unsqueeze_Op that wil want to access
+      // Here we bind the methods of the Unsqueeze_Op that will want to access
       .def("get_inputs_name", &Unsqueeze_Op::getInputsName)
       .def("get_outputs_name", &Unsqueeze_Op::getOutputsName)
       .def("axes", &Unsqueeze_Op::axes);
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index ca8d1f33086fb5093c76826e5a2f53df873badf5..aa42c6605217f63b6871d1a3475b9612097577cd 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -17,7 +17,7 @@ void init_Log(py::module& m){
           R"mydelimiter(
           Detailed messages for debugging purposes, providing information helpful
           for developers to trace and identify issues.
-          Detailed insights of what is appening in an operation, not useful for the
+          Detailed insights of what is happening in an operation, not useful for the
           end-user. The operation is performed nominally.
           Note: This level is disabled at compile time for Release, therefore
           inducing no runtime overhead for Release.
diff --git a/python_binding/utils/pybind_TensorUtils.cpp b/python_binding/utils/pybind_TensorUtils.cpp
index d82db0355ad641062ec89b1b331c74ccfde4c0b6..15fabdcb700ed5ca15d3d60952f55df488d41bc3 100644
--- a/python_binding/utils/pybind_TensorUtils.cpp
+++ b/python_binding/utils/pybind_TensorUtils.cpp
@@ -41,7 +41,7 @@ void addTensorUtilsFunction(py::module &m){
         :type t1: :py:class:`aidge_core.Tensor`
         :param t2: second tensor to test
         :type t2: :py:class:`aidge_core.Tensor`
-        :param relative: relative difference allowed (should be betwen 0 and 1)
+        :param relative: relative difference allowed (should be between 0 and 1)
         :type relative: float
         :param absolute: absolute error allowed (shoulmd be positive)
         :type absolute: float
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index e1f25e86827e81b26436876dce1b98fe0cda80b8..48597e2b6fa95ff3195ed2eea6b8c39dcef86771 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -103,7 +103,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         std::string givenName =
             (node_ptr->name().empty())
                 ? "<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>"
-                : "\"" + node_ptr->name() + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
+                : "\"" + node_ptr->name() + "\\n<sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
 
         std::string nodeCls = "";
         if (node_ptr->type() == "Producer") {
@@ -144,31 +144,27 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
       }
       IOIndex_t outputIdx = 0;
       for (const auto& childs : node_ptr->getOrderedChildren()) {
-        // Keep only unique childs in order to avoid duplicating connections
-        const auto uniqueChilds = std::set<NodePtr>(childs.begin(), childs.end());
-        for (const auto& child : uniqueChilds) {
+        for (const auto& child : childs) {
           if (child != nullptr) {
             IOIndex_t inputIdx = 0;
             for (auto parent : child->inputs()) {
               if (parent.first == node_ptr && parent.second == outputIdx) {
                 // Add-on to display the operator's output dimensions
                 std::string dims = "";
-                std::string dtype = "";
                 const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
                 if (op && !op->getOutput(outputIdx)->undefined()) {
                   dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
-                  dtype += "\n" + fmt::format("{}", op->getOutput(outputIdx)->dataType());
                 }
 
                 if (mNodes.find(child) != mNodes.end()) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, dtype, inputIdx, child->type(), namePtrTable.at(child));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}_{}\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, inputIdx, child->type(), namePtrTable.at(child));
                 }
                 else if (verbose) {
-                  fmt::print(fp.get(), "{}_{}-->|\"{}{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
-                              outputIdx, dims, dtype, inputIdx, static_cast<void*>(child.get()));
+                  fmt::print(fp.get(), "{}_{}-->|\"{}{}&rarr;{}\"|{}:::externalCls\n", node_ptr->type(), namePtrTable.at(node_ptr),
+                              outputIdx, dims, inputIdx, static_cast<void*>(child.get()));
                 }
-                // Do no break here because the same child can be connected to several inputs
+                break;
               }
               ++inputIdx;
             }
@@ -274,10 +270,7 @@ void Aidge::GraphView::setRootNode(NodePtr node) {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mInputNodes) {
-        // Do not include dummy inputs
-        if (node.first) {
-            nodes.insert(node.first);
-        }
+        nodes.insert(node.first);
     }
     return nodes;
 }
@@ -285,10 +278,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::inputNodes() const {
 std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::outputNodes() const {
     std::set<std::shared_ptr<Aidge::Node>> nodes;
     for (const auto& node : mOutputNodes) {
-        // Do not include dummy outputs
-        if (node.first) {
-            nodes.insert(node.first);
-        }
+        nodes.insert(node.first);
     }
     return nodes;
 }
@@ -351,7 +341,7 @@ Aidge::IOIndex_t Aidge::GraphView::getNbDataInputs() const {
   for (const std::shared_ptr<Node> &inNode : inputNodes()) {
     // We cannot simply add inNode->nbDataInputs(), as input nodes may already
     // have some inputs connected within the GraphView, which would therefore not
-    // constitue inputs (from outside) for the GraphView!
+    // constitute inputs (from outside) for the GraphView!
     const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> inputNodeinputs =
         inNode->dataInputs();
 
@@ -433,7 +423,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
     // remove current Data connections and use dummy inputs to propagate dimensions
     // setInputs
     // Link every tensor to the right pointer
-    // following parent - children informations
+    // following parent - children information
     if (!dims.empty()){
       Log::debug("forwardDims(): setting graph input dims ({} dims provided).", dims.size());
 
@@ -532,10 +522,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 }
 
                 if (parentsForwarded && op->forwardDims(allowDataDependency)) {
-                    Log::debug("Dimensions forwarded for node {} (of type {})",
-                        nodePtr->name(), nodePtr->type());
-
-                    // Recompute everytime, even if it was already computed in a
+                    // Recompute every time, even if it was already computed in a
                     // previous call of forwardDims(), as the graph may have changed!
                     dimsForwarded.insert(nodePtr);
                     for (const auto& child : nodePtr->getChildren()) {
@@ -545,9 +532,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                     }
                 }
                 else {
-                    if (parentsForwarded) {
-                        Log::debug("Unable to forward dimensions for node {} (of type {})", nodePtr->name(), nodePtr->type());
-                    }
+                    Log::debug("Unable to forward dimensions for node {} (of type {}) yet", nodePtr->name(), nodePtr->type());
                     nextList.insert(nodePtr);
                 }
             }
@@ -629,7 +614,7 @@ void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/,
 }
 
 void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) {
-  AIDGE_ASSERT(node != nullptr, "Trying to add non-existant node!");
+  AIDGE_ASSERT(node != nullptr, "Trying to add non-existent node!");
 
   // first node to be added to the graph is the root node by default
   if (mRootNode == nullptr) {
@@ -700,61 +685,6 @@ std::pair<std::vector<Aidge::NodePtr>, size_t> Aidge::GraphView::getRankedNodes(
   return std::make_pair(rankedNodes, orderUnicityLimit);
 }
 
-std::vector<Aidge::NodePtr> Aidge::GraphView::getOrderedNodes(bool reversed) const {
-    // We compute the order from a post-dfs walk on the reverse graph starting from
-    // ordered output nodes.
-    // Also, we walk the graph upward left to right in order
-    // to get a topological left-right order when possible.
-    // For the case where reversed is true, we walk the graph upward right to left
-    // and reverse the final order to get a post-dfs left-right order when possible.
-    std::vector<std::pair<NodePtr,std::pair<size_t, std::vector<NodePtr>>>> stack;
-    std::vector<NodePtr> reversePostDfs;
-    std::set<NodePtr> visited;
-    std::vector<NodePtr> outNodes(mNodes.size());
-    auto reverse_if_dfs = [reversed](auto &parents) {
-        if (reversed) std::reverse(parents.begin(), parents.end());
-    };
-    for (const auto& output : mOutputNodes) {
-            outNodes.push_back(output.first);
-    }
-    reverse_if_dfs(outNodes);
-    stack.push_back(std::make_pair(nullptr, std::make_pair(0, std::move(outNodes))));
-    while (!stack.empty()) {
-        auto node = stack.back().first;
-        auto& parentIdx = stack.back().second.first;
-        auto& parents = stack.back().second.second;
-        if (parentIdx == parents.size()) {
-            stack.pop_back();
-            if (node) {
-                reversePostDfs.push_back(node);
-            }
-        } else {
-            auto backEdgeIdx = reversed ? parents.size() - 1 - parentIdx: parentIdx;
-            auto isBackEdge = node != nullptr ? node->parentIsBackEdge(backEdgeIdx): false;
-            auto parent = parents[parentIdx++];
-            if (parent != nullptr && inView(parent) &&
-                visited.find(parent) == visited.end()) {
-                if (isBackEdge) {
-                    stack[0].second.second.push_back(parent);
-                } else {
-                    visited.insert(parent);
-                    auto next_parents = parent->getParents();
-                    reverse_if_dfs(next_parents);
-                    stack.push_back(std::make_pair(parent, std::make_pair(0, std::move(next_parents))));
-                }
-            }
-        }
-    }
-
-    if (reversePostDfs.size() != mNodes.size()) {
-        AIDGE_THROW_OR_ABORT(std::runtime_error,
-                             "Could not enumerate all nodes, set output nodes such that all graph nodes are connected.");
-    }
-
-    reverse_if_dfs(reversePostDfs);
-    return reversePostDfs;
-}
-
 std::map<Aidge::NodePtr, std::string> Aidge::GraphView::getRankedNodesName(const std::string& format, bool markNonUnicity) const {
   const auto rankedNodes = getRankedNodes();
   std::map<NodePtr, std::string> rankedNodesName;
@@ -821,7 +751,7 @@ bool Aidge::GraphView::add(std::set<std::shared_ptr<Node>> otherNodes, bool incl
     mRootNode = *noParentNodes.begin();
 
     if (noParentNodes.size() > 1) {
-      // If there is more than one, order unicity cannot be garanteed!
+      // If there is more than one, order unicity cannot be guaranteed!
       orderUnicity = false;
     }
 
@@ -924,7 +854,7 @@ void Aidge::GraphView::addChild(
   // assert input node is valid
   if (!toNode.first) {
     assert(toOtherView->inputNodes().size() == 1U &&
-           "If no intput node is provided, the other graph should have only "
+           "If no input node is provided, the other graph should have only "
            "one to make the choice explicit.");
     toNode.first = *(toOtherView->inputNodes().begin());
   } else {
@@ -1045,7 +975,7 @@ void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnab
 
 
 bool Aidge::GraphView::swap(Node & /*node*/, Node & /*otherNode*/) {
-  fmt::print("Swap() not implementated yet. Return false.\n");
+  fmt::print("Swap() not implemented yet. Return false.\n");
   return false;
 }
 
@@ -1476,7 +1406,7 @@ void Aidge::GraphView::updateInputsOutputsDelete(std::shared_ptr<Node> deletedNo
 std::shared_ptr<Aidge::GraphView> Aidge::GraphView::cloneCallback(NodePtr(*cloneNode)(NodePtr)) const {
   std::shared_ptr<GraphView> newGraph = std::make_shared<GraphView>(mName);
 
-  // Map for old node -> new node correspondance
+  // Map for old node -> new node correspondence
   std::map<NodePtr, NodePtr> oldToNewNodes;
 
   for (const std::shared_ptr<Node> &node_ptr : mNodes) {
diff --git a/src/graphRegex/GraphFsmInterpreter.cpp b/src/graphRegex/GraphFsmInterpreter.cpp
index 18b768c6567e64caf6841ed4a339f13fd16f69d6..a2f07129c468c737da022de8a6d1b093cdd08e39 100644
--- a/src/graphRegex/GraphFsmInterpreter.cpp
+++ b/src/graphRegex/GraphFsmInterpreter.cpp
@@ -145,7 +145,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::REF,mNodesCondition, lexem.str());
                     }else{
                         /*
-                        the sequencial quantify case 
+                        the sequential quantify case 
                         no reference to common 
                         */
                         edge = FsmEdgeFactory::make(valid,start,FsmEdgeTypes::EMPTY,mNodesCondition,"");
@@ -165,7 +165,7 @@ std::shared_ptr<FsmGraph> GraphFsmInterpreter::qomF(std::shared_ptr<FsmGraph> fs
 
 std::shared_ptr<FsmGraph> GraphFsmInterpreter::qzmF(std::shared_ptr<FsmGraph> fsm){
         /*
-        qomf and a bypass empty start to valide 
+        qomf and a bypass empty start to valid 
         */
     fsm = qomF(fsm);
 
diff --git a/src/graphRegex/GraphLexer.cpp b/src/graphRegex/GraphLexer.cpp
index f504ad025940c88058ce5949259c464ae2cedfb6..05a23d02cdbfe072337ea2cc6ed92410e914257b 100644
--- a/src/graphRegex/GraphLexer.cpp
+++ b/src/graphRegex/GraphLexer.cpp
@@ -79,7 +79,7 @@ std::shared_ptr<ParsingToken<gRegexTokenTypes>> GraphLexer::getNextToken(void){
 
                 if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,cKeyRegex))
                 {
-                    currentChars.pop_back(); //the last char is the problemes
+                    currentChars.pop_back(); //the last char is the problems
                     break;
                 }
                 else if (std::regex_match(currentChars,cKeyRegex)){
@@ -89,7 +89,7 @@ std::shared_ptr<ParsingToken<gRegexTokenTypes>> GraphLexer::getNextToken(void){
                 if (mPosition < mRegularExpressions.length()) currentChars += mRegularExpressions[mPosition];
                 
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mRegularExpressions.length()-1)
diff --git a/src/graphRegex/matchFsm/FsmEdge.cpp b/src/graphRegex/matchFsm/FsmEdge.cpp
index 638aad3bc3f5c94d5b20420ed8cc0799daa08cc0..170d5e69366d25ba19ea2f514cab6cd59b545ec0 100644
--- a/src/graphRegex/matchFsm/FsmEdge.cpp
+++ b/src/graphRegex/matchFsm/FsmEdge.cpp
@@ -141,7 +141,7 @@ FsmEdge::FsmEdge(std::shared_ptr<FsmNode>& source,std::shared_ptr<FsmNode>& dest
 {
     mNodeSource = source;
     mNodeDest   = dest;
-    // wen i make the edge I init the nodes
+    // when i make the edge I init the nodes
     // mNodeSource->addEdge(shared_from_this());
     // mNodeDest->addParent(mNodeSource);
 }
diff --git a/src/graphRegex/matchFsm/FsmGraph.cpp b/src/graphRegex/matchFsm/FsmGraph.cpp
index a56474e042cc44a68938b1d19e19a0c6841cb8cb..2ba11a4d26b933bdbfad5c91d127bfa2682473d4 100644
--- a/src/graphRegex/matchFsm/FsmGraph.cpp
+++ b/src/graphRegex/matchFsm/FsmGraph.cpp
@@ -33,7 +33,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
         for(auto fsmContext : walks){
             allContextSee.push_back(fsmContext);
             //if we are in a valid st we save it
-            //it's one solution of the posible solution of the matching
+            //it's one solution of the possible solution of the matching
             if(fsmContext->isOnValidState()){
                 //not save 2 time the same end point
                 if(!std::any_of(allValidContext.begin(), allValidContext.end(),
@@ -45,7 +45,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
 
             }
 
-            //dont test 2 time a fsmContext
+            //don't test 2 time a fsmContext
             std::vector<std::shared_ptr<FsmRunTimeContext>> tmpNextWalks = fsmContext->getActState()->test(fsmContext);
             for(auto PotentialFsmContext : tmpNextWalks){
 
@@ -135,15 +135,15 @@ void FsmGraph::mergeOneStartOneValid(const std::shared_ptr<FsmGraph> fsmGraph){
     if (startNodes.size() != 1 || validNodes.size() != 1){
 
         std::ostringstream errorMessage;
-        errorMessage <<"mergeOneStartOneValid  start size: " << startNodes.size() << " valide size : " << validNodes.size()
-        <<" can only merge FSM 1 start 1 valide";
+        errorMessage <<"mergeOneStartOneValid  start size: " << startNodes.size() << " valid size : " << validNodes.size()
+        <<" can only merge FSM 1 start 1 valid";
         throw std::runtime_error(errorMessage.str());
     }
 
     unionG(fsmGraph);
-    //for loop useless but for future merge it's coudl be used
+    //for loop useless but for future merge it's could be used
     for(auto valid : validNodes){
-        valid->unValid();
+        valid->invalid();
         for(auto start : startNodes){
             start->unStart();
             _mergeNode(start,valid);
@@ -179,7 +179,7 @@ void FsmGraph::_mergeNode(std::shared_ptr<FsmNode> source,std::shared_ptr<FsmNod
     }
     nodes.clear();
 
-    //probagate source attribut
+    //probagate source attribute
     if(source->isValid()){
         dest->valid();
     }
diff --git a/src/graphRegex/matchFsm/FsmNode.cpp b/src/graphRegex/matchFsm/FsmNode.cpp
index 7bc4cf105b43a540bd0e9c686af35dd220611a09..6666d1a72a298f20bdae0eb1c51805e5ae133ba4 100644
--- a/src/graphRegex/matchFsm/FsmNode.cpp
+++ b/src/graphRegex/matchFsm/FsmNode.cpp
@@ -103,7 +103,7 @@ bool FsmNode::isValid(void){
 bool FsmNode::isStart(void){
     return mIsAStart;
 }
-void FsmNode::unValid(void){
+void FsmNode::invalid(void){
     mIsAValid =false;
 }
 void FsmNode::valid(void){
diff --git a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
index 7a09908e5629e299b6b264fbfaac97bdaf7fa316..89e7faf205ef515049de415a5f057db8a13105e9 100644
--- a/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
+++ b/src/graphRegex/matchFsm/FsmRunTimeContext.cpp
@@ -74,7 +74,7 @@ bool FsmRunTimeContext::isAlreadyValid(NodePtr node){
 bool FsmRunTimeContext::areCompatible(std::shared_ptr<FsmRunTimeContext> fsmContext){
     /*
     see if 2 context can be merge
-    it need to have different  mValidNodes exept for common
+    it need to have different  mValidNodes except for common
     and the same idx for the common
     */
 
@@ -192,9 +192,9 @@ std::set<NodePtr> FsmRunTimeContext::getValidNodes(void){
 
 std::set<NodePtr> FsmRunTimeContext::getValidNodesNoCommon(void){
     std::set<NodePtr> differenceSet;
-    std::set<NodePtr> valide = getValidNodes();
+    std::set<NodePtr> valid = getValidNodes();
     std::set<NodePtr> common = getCommonNodes();
-    std::set_difference(valide.begin(), valide.end(), common.begin(), common.end(),std::inserter(differenceSet, differenceSet.end()));
+    std::set_difference(valid.begin(), valid.end(), common.begin(), common.end(),std::inserter(differenceSet, differenceSet.end()));
     return differenceSet;
 }
 
diff --git a/src/nodeTester/ConditionalInterpreter.cpp b/src/nodeTester/ConditionalInterpreter.cpp
index f40e62305334f740057f88ef21cdab749d64bd99..5d10762d93d2bc0e92bf9d15bb24255bb7e51768 100644
--- a/src/nodeTester/ConditionalInterpreter.cpp
+++ b/src/nodeTester/ConditionalInterpreter.cpp
@@ -8,11 +8,11 @@ using namespace Aidge;
 //ConditionalRegisterFunction
 ///////////////////////////////
 
-     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & datas){
+     std::shared_ptr<ConditionalData> ConditionalRegisterFunction::run(const std::string key,std::vector< std::shared_ptr<ConditionalData>> & data){
 
         auto lambdaIt = mWlambda.find(key);
         if (lambdaIt != mWlambda.end()) {
-            return lambdaIt->second(datas);
+            return lambdaIt->second(data);
         }else {
             throw std::runtime_error("can not run Lambda due to invalid key: " + key);
         }
@@ -174,7 +174,7 @@ using namespace Aidge;
                         case ConditionalTokenTypes::RPAREN:
                         case ConditionalTokenTypes::STOP:
                         default:
-                            throw std::runtime_error("NODE TYPE NOT SUPORTED IN ConditionalInterpreter");
+                            throw std::runtime_error("NODE TYPE NOT SUPPORTED IN ConditionalInterpreter");
                     }
                 }catch(const std::exception& e){
                     std::ostringstream errorMessage;
@@ -188,7 +188,7 @@ using namespace Aidge;
 
 
     //////////////////////
-    //value convertor
+    //value converter
     /////////////////////
 
 
diff --git a/src/nodeTester/ConditionalLexer.cpp b/src/nodeTester/ConditionalLexer.cpp
index e70772fc1a5d6136fb56f5981d73bf6cb0622991..9cc480ab29e84a775d9e275fe6ba51dc11e6ea14 100644
--- a/src/nodeTester/ConditionalLexer.cpp
+++ b/src/nodeTester/ConditionalLexer.cpp
@@ -28,7 +28,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
             mPosition++;
             continue;
         }
-        //performe tokenisation, find a regex and make a new token
+        //perform tokenisation, find a regex and make a new token
         
         if (std::regex_match(currentChars,std::regex("\\&\\&")))// the AND TOKEN 
         {
@@ -86,7 +86,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
         //non const lent token
         /////
 
-        //LAMBDA, KEY , bool //the fuction TAG 
+        //LAMBDA, KEY , bool //the function TAG 
         else if (std::regex_match(currentChars,std::regex("[A-Za-z_]")))// the KEY TOKEN (a char next )
         {   
             //read all the key 
@@ -97,7 +97,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
             while ( mPosition < mConditionalExpressions.length()) {
                 if(!std::regex_match(currentChars,keyRegex) && !std::regex_match(currentChars,LambdaRegex))
                 {
-                    currentChars.pop_back(); //the last char is the problemes
+                    currentChars.pop_back(); //the last char is the problems
                     break;
                 }
                 else if (std::regex_match(currentChars,LambdaRegex)){
@@ -107,7 +107,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
                 //currentChars += mConditionalExpressions[mPosition];
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mConditionalExpressions.length()-1)
@@ -116,7 +116,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 {
                     throw badTokenError(currentChars,mPosition);
                 }
-                //mPosition++; // we stop all by going pos > lengt
+                //mPosition++; // we stop all by going pos > length
             }
 
 
@@ -153,7 +153,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (mPosition < mConditionalExpressions.length()) currentChars += mConditionalExpressions[mPosition];
                 //currentChars += mConditionalExpressions[mPosition];
             }
-            //we end the match 2 posibility 
+            //we end the match 2 possibility 
             //we are at the end of the mConditionalExpressions and we need to ensure the match
             //we are not we can continu
             if (mPosition == mConditionalExpressions.length()-1)
@@ -189,7 +189,7 @@ std::shared_ptr<ParsingToken<ConditionalTokenTypes>> ConditionalLexer::getNextTo
                 if (!std::regex_match(currentChars,strRegex)){
                      throw badTokenError(currentChars,mPosition);
                 }
-                //mPosition++; // we stop all by going pos > lengt
+                //mPosition++; // we stop all by going pos > length
             }
 
             mPosition++; // go after the last " 
diff --git a/src/nodeTester/ConditionalParser.cpp b/src/nodeTester/ConditionalParser.cpp
index ba40c561375e0c09eb86009d447a782ab99d5d0b..5cf6f8617612b09c1a61e694a56dc6ed4d0f2b39 100644
--- a/src/nodeTester/ConditionalParser.cpp
+++ b/src/nodeTester/ConditionalParser.cpp
@@ -76,7 +76,7 @@ std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::Conditional
         return constructAstLambda();
     }
 
-   throw std::runtime_error("ConditionalParser unknow val type "+ token->rep().str() + "\n" + mLexer.rep());
+   throw std::runtime_error("ConditionalParser unknown val type "+ token->rep().str() + "\n" + mLexer.rep());
 
 }
 
@@ -169,7 +169,7 @@ std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::Conditional
 
         std::shared_ptr<AstNode<ConditionalTokenTypes>> right = constructAstExpr(prec);
 
-        //i'm not sur what append to newNode
+        //i'm not sure what append to newNode
         //std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,constructAstCmpr()});
         std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,right});
         left = newNode;
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 29a9ee6252a0c2baa6e07bc56e60650685db6bdd..6de0854e8cdc166a3f938a166348db481956e792 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -72,7 +72,7 @@ bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
         const DimSize_t roi = end - start + 1;
 
         AIDGE_ASSERT(start < nbDims && end < nbDims, "'start' and 'end' must be < {}", nbDims);
-        AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
+        AIDGE_ASSERT(roi> 1, "Invalid ROI for Shape");
 
         mOutputs[0]->resize({roi});
         return true;
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index e3ed13588d8c2b5ddde91d37fc926d675f0666a3..2191f14a150088dfa1d369d2ef31051e5ab16326 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -43,7 +43,7 @@ void Aidge::Split_OpImpl::forward() {
         {
             // Compute chunk position in input tensor
             DimSize_t idx = j * stride_post * dims[axis] + chunkIdxOnAxis;
-            // Copy chunk in ouput
+            // Copy chunk in output
             op.getOutput(i)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(idx),
                                             splits[i] * stride_post, offset);
             offset += splits[i] * stride_post;
@@ -124,7 +124,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
         // Fill Split attr if empty
         if(this->split().empty()) {
             // In case the input Split is not provided, divide the dimension of Axis into equal slices
-            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
+            AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} mustn't be bigger than dimension {}.", nbOutput, dimToSplit);
             DimSize_t baseSliceSize = dimToSplit / nbOutput;
 
             DimSize_t remainder = dimToSplit % nbOutput;
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index 4c4de25282c487d023f9c184b015ac332e716b7b..50c8f561c1732d6f7f37ae5b8d6f03c4e135939c 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -37,7 +37,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         const std::shared_ptr<GraphView>  metaOpGraph = metaOp -> getMicroGraph();
         const std::vector<std::pair<std::shared_ptr<Node>, IOIndex_t>> outputNodes = metaOpGraph -> getOrderedOutputs();
         if (outputNodes.size() != 1) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipie.");
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Bad MetaOperator argument for fuseBatchNorm recipe.");
         }
         convNode = outputNodes[0].first;
     }
@@ -99,7 +99,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
         Log::notice("Warning: variance < 1e-12 for all outputs! Is the network correctly trained?\n");
     }
 
-    // Add bias if it is non existant, as there will be a bias after the fuse
+    // Add bias if it is non existent, as there will be a bias after the fuse
     if (!convOp->getInput(2)) {
         if (metaNode) {
             // Conv is inside a meta-operator, we add bias outside it
diff --git a/src/recipes/MatMulToFC.cpp b/src/recipes/MatMulToFC.cpp
index 9b5addd3bb971b3f61980a582d4cce6435c57219..8d902c680b8fa0d30a873b6f355734ce19d608f5 100644
--- a/src/recipes/MatMulToFC.cpp
+++ b/src/recipes/MatMulToFC.cpp
@@ -34,7 +34,7 @@ void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
 
 
     // Step 1 : Create FC
-    // Fetch the output dimension throught the bias size
+    // Fetch the output dimension through the bias size
     std::shared_ptr<Node> bias = nullptr;
     if (addNode) {
         if (addNode->getParent(0) == matmulNode) {
@@ -76,7 +76,7 @@ void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
 
-    // Instanciate FC
+    // Instantiate FC
     std::string fcName = matmulNode->name();
     if (addNode && !addNode->name().empty()) {
         fcName += "_" + addNode->name();
diff --git a/src/scheduler/MemoryManager.cpp b/src/scheduler/MemoryManager.cpp
index 6fe0d1f0745a464b8fd61bf634d7105b9d22faf8..ba805f919a607e0b2ae3272d173aa11360548fa7 100644
--- a/src/scheduler/MemoryManager.cpp
+++ b/src/scheduler/MemoryManager.cpp
@@ -898,7 +898,7 @@ Aidge::MemoryManager::getMaxHole(std::shared_ptr<MemorySpace> memSpace) const
                     std::make_pair((*itPlane).allocated, holeSize));
 
                 if (!newInsert) {
-                    // Another plane exists at the same time, one must substract
+                    // Another plane exists at the same time, one must subtract
                     // the size of this other plane from the hole size
                     (*it).second = std::max(0, static_cast<int>((*it).second)
                         - static_cast<int>((*itPlane).getContiguousSize())
diff --git a/src/scheduler/ParallelScheduler.cpp b/src/scheduler/ParallelScheduler.cpp
index 2b9a1f5b62741d5f08dfc3e5aa45b1102d54b850..2a44dd49f961bdcdf965a33d2ffe91f3ed8ae352 100644
--- a/src/scheduler/ParallelScheduler.cpp
+++ b/src/scheduler/ParallelScheduler.cpp
@@ -46,7 +46,7 @@ void Aidge::ParallelScheduler::forward(bool forwardDims, const std::vector<std::
 
     const auto namePtrTable = mGraphView->getRankedNodesName("{0} ({1}#{3})");
 
-    // Sort static scheduling, the order will be the prefered threads scheduling
+    // Sort static scheduling, the order will be the preferred threads scheduling
     // order for non critical nodes
     std::deque<StaticSchedulingElement*> staticSchedule(mStaticSchedule.at(mStaticScheduleStep).begin(), mStaticSchedule.at(mStaticScheduleStep).end());
     std::stable_sort(staticSchedule.begin(), staticSchedule.end(),
diff --git a/unit_tests/backend/Test_TensorImpl.cpp b/unit_tests/backend/Test_TensorImpl.cpp
index ceb6772d01d4ee84524896fead96abcb445f84ff..2f6ef519935295dce5edd0d486c9f5ba6e307331 100644
--- a/unit_tests/backend/Test_TensorImpl.cpp
+++ b/unit_tests/backend/Test_TensorImpl.cpp
@@ -34,7 +34,7 @@ TEST_CASE("[backend/cpu/data] Tensor", "[TensorImpl]") {
 }
 
 TEST_CASE("Tensor fill", "[TensorImpl][fill]") {
-  SECTION("Instantiate batches independantly") {
+  SECTION("Instantiate batches independently") {
     // initialization with 0s
     std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
     //concatenatedTensor->print();
diff --git a/unit_tests/graph/Test_Connector.cpp b/unit_tests/graph/Test_Connector.cpp
index 79acce9281039f9f3c67b7235d8999b6c7173685..3fe2318c81a95528e46900e82fa7e4b69f5e28f5 100644
--- a/unit_tests/graph/Test_Connector.cpp
+++ b/unit_tests/graph/Test_Connector.cpp
@@ -184,7 +184,7 @@ TEST_CASE("Connector Mini-graph", "[Connector]") {
     // g->save("TestGraph");
 }
 
-TEST_CASE("Structural descrition - Sequential", "[GraphView]") {
+TEST_CASE("Structural description - Sequential", "[GraphView]") {
     // SECTION("Empty Sequence") {
     //     std::shared_ptr<GraphView> g1 = Sequential(); // Not supported
     //     REQUIRE(g1->getNodes() == std::set<std::shared_ptr<Node>>());
@@ -256,7 +256,7 @@ TEST_CASE("Structural description - Parallel", "[GraphView]") {
     }
 }
 
-TEST_CASE("Strucutral Description - Complex Graph", "[GraphView]") {
+TEST_CASE("Structural Description - Complex Graph", "[GraphView]") {
     std::shared_ptr<Node> firstLayer = GenericOperator("first", 1, 0, 1);
     auto g = Sequential({firstLayer,
                     GenericOperator("l2", 1, 0, 1),
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 2fa06cf23b3b681211208a3e5bbea9226f0930b8..a7d02cd2fc1f3782046f3e8a9e7d7ca00b2ec5a7 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -195,7 +195,7 @@ TEST_CASE("remove", "[GraphView][remove]") {
             // g2->save("./remove2");
 
             REQUIRE(nodePtrTo(g1->getNodes(), nodePtrToName) == nodePtrTo(g2->getNodes(), nodePtrToName));
-            // Order not garanteed, because when a node is removed, it can create new GraphView inputs/outputs
+            // Order not guaranteed, because when a node is removed, it can create new GraphView inputs/outputs
             // Their order thus depends on the deletion order!
             //REQUIRE(nodePtrTo(g1->getOrderedInputs(), nodePtrToName) == nodePtrTo(g2->getOrderedInputs(), nodePtrToName));
             //REQUIRE(nodePtrTo(g1->getOrderedOutputs(), nodePtrToName) == nodePtrTo(g2->getOrderedOutputs(), nodePtrToName));
@@ -248,7 +248,7 @@ TEST_CASE("[core/graph] GraphView(add)", "[GraphView][add]") {
 
     SECTION("Several Nodes") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
-        // should automaticaly add parents for learnable parameters
+        // should automatically add parents for learnable parameters
         std::shared_ptr<Node> GOp1 = GenericOperator("Fictive", 0, 1, 1, "Gop1");
         std::shared_ptr<Node> GOp1parent = GenericOperator("Fictive", 0, 0, 1, "Gop1parent");
         GOp1parent->addChild(GOp1, 0, 0);
@@ -257,7 +257,7 @@ TEST_CASE("[core/graph] GraphView(add)", "[GraphView][add]") {
         REQUIRE(nodePtrTo(g->getOrderedInputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({}));
         REQUIRE(nodePtrTo(g->getOrderedOutputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({{"Gop1", 0}}));
 
-        // there should be no deplicates
+        // there should be no duplicates
         g->add(GOp1);
         REQUIRE(g->getNodes() == std::set<std::shared_ptr<Node>>({GOp1, GOp1parent}));
         REQUIRE(nodePtrTo(g->getOrderedInputs(), nodePtrToName) == std::vector<std::pair<std::string, IOIndex_t>>({}));
@@ -396,7 +396,7 @@ TEST_CASE("[core/graph] GraphView(save)") {
 }
 
 TEST_CASE("[core/graph] GraphView(resetConnections)") {
-    SECTION("disconnect data iput") {
+    SECTION("disconnect data input") {
         std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
         std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 2, 1, "c1");
         std::shared_ptr<Node> conv2 = GenericOperator("Conv", 1, 0, 1, "c2");
diff --git a/unit_tests/graphRegex/Test_FsmMatch.cpp b/unit_tests/graphRegex/Test_FsmMatch.cpp
index 008251feaac9d2dbe21aae3dfc7ebaa69e828ae7..6229ec62af96802ebdfe871e6058ab1791cf80fd 100644
--- a/unit_tests/graphRegex/Test_FsmMatch.cpp
+++ b/unit_tests/graphRegex/Test_FsmMatch.cpp
@@ -52,7 +52,7 @@ TEST_CASE("FsmMatch") {
     }
 
 
-    SECTION("2 branche graph"){
+    SECTION("2 branches graph"){
 
         std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
         std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 0, 1, "c");
diff --git a/unit_tests/graphRegex/Test_GraphLexer.cpp b/unit_tests/graphRegex/Test_GraphLexer.cpp
index 1b8cc8e018546ebfe3f84202d9404db27b17449b..615c052041e40c2941054b7ddcc19229c0994f0d 100644
--- a/unit_tests/graphRegex/Test_GraphLexer.cpp
+++ b/unit_tests/graphRegex/Test_GraphLexer.cpp
@@ -104,7 +104,7 @@ TEST_CASE("GraphRegex", "Lexer") {
 
 
             std::ostringstream errorMessage;
-            errorMessage << "\n we whant :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
+            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
 
             CAPTURE(errorMessage.str());
             REQUIRE(token->getLexeme() == lexemToFind);
diff --git a/unit_tests/graphRegex/Test_graphRegexAST.cpp b/unit_tests/graphRegex/Test_graphRegexAST.cpp
index 1cdb0bc1934983a26ab742bfe8879455077219cc..f9c7a7c5dc8ab103b3b566f2df77883b0b1966f1 100644
--- a/unit_tests/graphRegex/Test_graphRegexAST.cpp
+++ b/unit_tests/graphRegex/Test_graphRegexAST.cpp
@@ -55,7 +55,7 @@ TEST_CASE("GraphStrInterpreter") {
         for (const std::string& test : tests) {
             std::shared_ptr<GraphStrInterpreter>  strGenerator = std::make_shared<GraphStrInterpreter>(test);
             std::string astString = strGenerator->interpret();
-            //supress space in the test becase erase in the AST
+            //suppress space in the test because erase in the AST
             std::string testNoS = test;
             testNoS.erase(std::remove_if(testNoS.begin(), testNoS.end(), ::isspace), testNoS.end());
             //if the last char is ; (SEP) it will not in the AST and it's not a bug erase it
diff --git a/unit_tests/nodeTester/Test_ConditionalLexer.cpp b/unit_tests/nodeTester/Test_ConditionalLexer.cpp
index a937c27227dde4fa03ed7733df9e9552c3c1ac7b..d79824e2e53d0c9621fdc6847ebca00faba03af4 100644
--- a/unit_tests/nodeTester/Test_ConditionalLexer.cpp
+++ b/unit_tests/nodeTester/Test_ConditionalLexer.cpp
@@ -130,7 +130,7 @@ TEST_CASE("nodeTester", "Lexer") {
 
 
             std::ostringstream errorMessage;
-            errorMessage << "\n we whant :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
+            errorMessage << "\n we want :"<< lexemToFind << "\n we get : "<< token->getLexeme() <<"\n"<< "on \n" << testString << " :\n "  ;
 
             CAPTURE(errorMessage.str());
             REQUIRE(token->getLexeme() == lexemToFind);
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 41bad69749fd82f892c6faa625739d0493396c73..c82c55f165278c985dabd771cd6481a4839ada2c 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -82,7 +82,7 @@ TEST_CASE("[core/operator] GenericOp(type check)", "[Operator]") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         Testop.addAttr<long>("LongAttr", 3);
 
-        // This line should raise a failled assert
+        // This line should raise a failed assert
         REQUIRE_THROWS(Testop.getAttr<int>("LongAttribute"));
     }
 }
diff --git a/unit_tests/operator/Test_Squeeze_Op.cpp b/unit_tests/operator/Test_Squeeze_Op.cpp
index 471a1dcd1e45384b2c65da75ddee9d3ec039dc34..660e970dd65bb7c4b9a52b0ccb62350ca355d243 100644
--- a/unit_tests/operator/Test_Squeeze_Op.cpp
+++ b/unit_tests/operator/Test_Squeeze_Op.cpp
@@ -156,7 +156,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
     }
     SECTION("axes is given via tensor") {
       SECTION("tensor is empty") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Squeeze(std::vector<std::int8_t>({0, 4}));
         auto op = std::static_pointer_cast<OperatorTensor>(
@@ -177,7 +177,7 @@ TEST_CASE("[core/operator] Squeeze(forwardDims)", "[Squeeze][forwardDims]") {
         CHECK(op->getOutput(0)->dims() == std::vector<DimSize_t>({3, 4, 5}));
       }
       SECTION("tensor not empty") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Squeeze(std::vector<std::int8_t>({3, 1}));
         auto op = std::static_pointer_cast<OperatorTensor>(
diff --git a/unit_tests/operator/Test_Unsqueeze_Op.cpp b/unit_tests/operator/Test_Unsqueeze_Op.cpp
index 79f5b89b1c08f409b214a9439431c2d2a51ddbd2..a436ab5a54e2f66fe87bfc28157d691cf6548dd4 100644
--- a/unit_tests/operator/Test_Unsqueeze_Op.cpp
+++ b/unit_tests/operator/Test_Unsqueeze_Op.cpp
@@ -177,7 +177,7 @@ TEST_CASE("[core/operator] Unsqueeze(forwardDims)",
       }
     }
     SECTION("axes is given via tensor") {
-        // arguments here should be overriden by axes_T values
+        // arguments here should be overridden by axes_T values
         std::shared_ptr<Node> myUnsqueeze =
             Unsqueeze(std::vector<std::int8_t>({0, 4}));
         auto op = std::static_pointer_cast<OperatorTensor>(
diff --git a/unit_tests/recipes/Test_ExplicitTranspose.cpp b/unit_tests/recipes/Test_ExplicitTranspose.cpp
index 0c0a46710d69606508a22e7b01dac708db9b8f34..bb89ba7952347a779e6979e7cf3c4f1bd68abf9b 100644
--- a/unit_tests/recipes/Test_ExplicitTranspose.cpp
+++ b/unit_tests/recipes/Test_ExplicitTranspose.cpp
@@ -41,11 +41,11 @@ TEST_CASE("[ExplicitTranspose] conv") {
     g1->forwardDims();
     explicitTranspose(g1);
 
-    // Check that Tranpose were inserted
+    // Check that Transpose were inserted
     g1->save("explicitTranspose_after");
     REQUIRE(g1->getNodes().size() == 12);
 
-    // Check that Tranpose are removed
+    // Check that Transpose are removed
     conv2->getOperator()->setDataFormat(DataFormat::NCHW);
     explicitTranspose(g1);
 
diff --git a/unit_tests/recipes/Test_removeConstantOfShape.cpp b/unit_tests/recipes/Test_removeConstantOfShape.cpp
index 247149a0fdb1087f14ac17d125659d677ccfb506..b912efc640fc901f694afeda256be91d51010419 100644
--- a/unit_tests/recipes/Test_removeConstantOfShape.cpp
+++ b/unit_tests/recipes/Test_removeConstantOfShape.cpp
@@ -32,8 +32,8 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/recipies] removeConstantOfShape",
-          "[ConstantOfShape][removeConstantOfShape][recipies]") {
+TEST_CASE("[cpu/recipes] removeConstantOfShape",
+          "[ConstantOfShape][removeConstantOfShape][recipes]") {
   auto input_T = std::make_shared<Tensor>(Array1D<int64_t, 4>({1, 1, 3, 3}));
 
   auto model = std::make_shared<GraphView>();
diff --git a/unit_tests/recipes/Test_removeFlatten.cpp b/unit_tests/recipes/Test_removeFlatten.cpp
index c3b4c08d98115c9f081bbbf8cb677114b66c545a..1b5e2783813da890b1e79744582f54bb5c932772 100644
--- a/unit_tests/recipes/Test_removeFlatten.cpp
+++ b/unit_tests/recipes/Test_removeFlatten.cpp
@@ -24,7 +24,7 @@
 
 namespace Aidge {
 
-TEST_CASE("[cpu/recipies] RemoveFlatten", "[RemoveFlatten][recipies]") {
+TEST_CASE("[cpu/recipes] RemoveFlatten", "[RemoveFlatten][recipes]") {
   std::shared_ptr<Node> flatten =
       GenericOperator("Flatten", 1, 0, 1, "myFlatten");
   std::shared_ptr<Node> fc0 = FC(10, 10, false, "FC_1");
diff --git a/unit_tests/utils/Test_StaticAttributes.cpp b/unit_tests/utils/Test_StaticAttributes.cpp
index 36c2e0454b415e1cb25cc3581016530a372b9e65..b23f8683e0ae3a2de805770556fefdd66722460d 100644
--- a/unit_tests/utils/Test_StaticAttributes.cpp
+++ b/unit_tests/utils/Test_StaticAttributes.cpp
@@ -43,6 +43,6 @@ TEST_CASE("[core/attributes] StaticAttribute") {
             attr<TestAttr::d>({true, false, true}));
 
         REQUIRE(attrs.getAttr<int>("a") == 42);
-        REQUIRE_THROWS(attrs.getAttr<int>("inexistant"));
+        REQUIRE_THROWS(attrs.getAttr<int>("inexistent"));
     }
 }