diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 7f32d695a41d954e9f31c6682e3cc6fc0226aed9..ff6601c487ea97294019a12ba899d251b08077e7 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_IMPORTS_H__
-#define __AIDGE_IMPORTS_H__
+#ifndef AIDGE_IMPORTS_H_
+#define AIDGE_IMPORTS_H_
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
@@ -48,4 +48,4 @@
 //#include "aidge/utilsParsing/AstNode.hpp"
 //#include "aidge/utilsParsing/ParsingToken.hpp"
 
-#endif /* __AIDGE_IMPORTS_H__ */
+#endif /* AIDGE_IMPORTS_H_ */
diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 7e022145d1eeaa8a2bd79afe69ca06ca57a62651..5aa2829e16f612b0867ab69feccb829ba2095e1b 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_OPERATORIMPL_H__
-#define __AIDGE_OPERATORIMPL_H__
+#ifndef AIDGE_OPERATORIMPL_H_
+#define AIDGE_OPERATORIMPL_H_
 
 #include <cstddef>
 #include <vector>
@@ -57,4 +57,4 @@ public:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_OPERATORIMPL_H__ */
+#endif /* AIDGE_OPERATORIMPL_H_ */
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 58f2d547e513d540a491155045c463f9a7199578..c56f66fc0b827ccccd9749b9880507dbf48c8179 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_TENSORIMPL_H__
-#define __AIDGE_TENSORIMPL_H__
+#ifndef AIDGE_TENSORIMPL_H_
+#define AIDGE_TENSORIMPL_H_
 
 #include <cstddef>
 #include <cstdio>
@@ -26,7 +26,7 @@ public:
     virtual void setRawPtr(void* /*ptr*/)
     {
         printf("Cannot set raw pointer for backend %s\n", mBackend);
-    };  
+    };
     virtual std::size_t scalarSize() const = 0; // Size of one scalar (in bytes)
     constexpr const char *backend() const { return mBackend; }
     virtual ~TensorImpl() = default;
@@ -38,4 +38,4 @@ private:
 
 } // namespace Aidge
 
-#endif /* __AIDGE_TENSORIMPL_H__ */
+#endif /* AIDGE_TENSORIMPL_H_ */
diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 4edc4b9a5a9fd877cf9a3e84c7f644be2a11534a..81b7810a8a548df7e5a2829b1a31cbe337491382 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_DATA_H__
-#define __AIDGE_DATA_H__
+#ifndef AIDGE_DATA_H_
+#define AIDGE_DATA_H_
 
 #include "aidge/utils/Parameter.hpp"
 
@@ -66,10 +66,10 @@ template <> const Aidge::DataType NativeType<int>::type = Aidge::DataType::Int32
 
 template <>
 const char* const EnumStrings<Aidge::DataType>::data[]
-    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary", 
-       "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16", 
-       "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6", 
+    = {"Float64", "Float32", "Float16", "BFloat16", "Binary", "Ternary",
+       "Int2", "Int3", "Int4", "Int5", "Int6", "Int7", "Int8", "Int16",
+       "Int32", "Int64", "UInt2", "UInt3", "UInt4", "UInt5", "UInt6",
        "UInt7", "UInt8", "UInt16", "UInt32", "UInt64"};
 }
 
-#endif /* __AIDGE_DATA_H__ */
\ No newline at end of file
+#endif /* AIDGE_DATA_H_ */
\ No newline at end of file
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 01e2a5a51d86c28d3a89bd9085c60bfad297623f..c3a6e478f8943253a9f9b3565db2d4452a9ca133 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_DATA_TENSOR_H__
-#define __AIDGE_CORE_DATA_TENSOR_H__
+#ifndef AIDGE_CORE_DATA_TENSOR_H_
+#define AIDGE_CORE_DATA_TENSOR_H_
 
 #include <cstring>
 #include <set>
@@ -156,10 +156,10 @@ class Tensor : public Data,
      * @param dataType Sets the type of inserted data.
      */
     Tensor(DataType dataType = DataType::Float32)
-        : Data(Type), 
-          mDataType(dataType), 
-          mDims({}), 
-          mSize(0), 
+        : Data(Type),
+          mDataType(dataType),
+          mDims({}),
+          mSize(0),
           mSizeM1(0)
     {
         // ctor
@@ -167,14 +167,14 @@ class Tensor : public Data,
 
     /**
      * @brief Construct a new Tensor object copied from another one.
-     * @param otherTensor 
+     * @param otherTensor
      */
     Tensor(const Tensor& otherTensor)
-        : Data(Type), 
-          mDataType(otherTensor.mDataType), 
-          mDims(otherTensor.mDims), 
-          mSize(otherTensor.mSize), 
-          mSizeM1(otherTensor.mSizeM1) 
+        : Data(Type),
+          mDataType(otherTensor.mDataType),
+          mDims(otherTensor.mDims),
+          mSize(otherTensor.mSize),
+          mSizeM1(otherTensor.mSizeM1)
     {
         if (otherTensor.hasImpl()) {
             mImpl = Registrar<Tensor>::create({otherTensor.mImpl->backend(), dataType()})(*this);
@@ -312,7 +312,7 @@ class Tensor : public Data,
 
     /**
      * @brief Assess data type, dimensions, backend and data are the same.
-     * @param otherTensor 
+     * @param otherTensor
      */
     bool operator==(const Tensor &otherTensor) const {
         if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
@@ -325,7 +325,7 @@ class Tensor : public Data,
     /**
      * @brief Set the backend of the Tensor associated implementation
      * @details Create and initialized an implementation if non was associated.
-     * @param name 
+     * @param name
      */
     inline void setBackend(const std::string &name) {
         if (mImpl) {
@@ -342,7 +342,7 @@ class Tensor : public Data,
 
     /**
      * @brief Get a list of available backends.
-     * @return std::set<std::string> 
+     * @return std::set<std::string>
      */
     static std::set<std::string> getAvailableBackends(){
         std::set<std::string> backendsList;
@@ -353,7 +353,7 @@ class Tensor : public Data,
 
     /**
      * @brief Get the data type enum.
-     * @return constexpr DataType 
+     * @return constexpr DataType
      */
     constexpr DataType dataType() const { return mDataType; }
 
@@ -376,27 +376,27 @@ class Tensor : public Data,
 
     /**
      * @brief Get the Impl object
-     * @return constexpr const std::unique_ptr<TensorImpl>& 
+     * @return constexpr const std::unique_ptr<TensorImpl>&
      */
     constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; }
 
     /**
      * @brief Return if an implementaiton has been associated.
-     * @return true 
-     * @return false 
+     * @return true
+     * @return false
      */
     bool hasImpl() const { return (mImpl) ? true : false; }
 
     /**
      * @brief Get number of dimensions of the Tensor.
-     * @return std::size_t 
+     * @return std::size_t
      */
     inline std::size_t nbDims() const { return mDims.size(); }
 
     /**
      * @brief Get dimensions of the Tensor object.
      * @tparam DIM number of dimensions.
-     * @return constexpr std::array<DimSize_t, DIM> 
+     * @return constexpr std::array<DimSize_t, DIM>
      */
     template <DimIdx_t DIM>
     constexpr std::array<DimSize_t, DIM> dims() const {
@@ -406,26 +406,26 @@ class Tensor : public Data,
 
     /**
      * @brief Get dimensions of the Tensor object.
-     * @return constexpr const std::vector<DimSize_t>& 
+     * @return constexpr const std::vector<DimSize_t>&
      */
     constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
 
     /**
      * @brief Get the number of elements in the Tensor object.
-     * @return constexpr std::size_t 
+     * @return constexpr std::size_t
      */
     constexpr std::size_t size() const { return mSize; }
 
     /**
      * @brief Get the number of elements in the N-1 dimensions of the Tensor object.
-     * @return constexpr std::size_t 
+     * @return constexpr std::size_t
      */
     constexpr std::size_t sizeM1() const { return mSizeM1; }
 
     /**
      * @brief Change the shape of the Tensor object according to the given argument.
      * @tparam DIM new dimensions.
-     * @param dims 
+     * @param dims
      */
     template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
     void resize(const std::array<DimSize_t, DIM> &dims) {
@@ -441,8 +441,8 @@ class Tensor : public Data,
 
     /**
      * @brief Return if the Tensor object has at leastone element.
-     * @return true 
-     * @return false 
+     * @return true
+     * @return false
      */
     bool empty() const { return mDims.empty(); }
 
@@ -540,8 +540,8 @@ class Tensor : public Data,
                 }
             }
         }
-        
-        
+
+
         res += "}";
         return res;
     }
@@ -575,10 +575,10 @@ private:
             mSizeM1 = std::accumulate(++mDims.begin(),mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
             mSize = static_cast<std::size_t>(mSizeM1 * mDims[0]);
         }
-        
+
         return mSize;
     }
 };
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_DATA_TENSOR_H__ */
+#endif /* AIDGE_CORE_DATA_TENSOR_H_ */
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index c5dde5c97c61d3661c1ee9cebe7cc17080950eb9..599ca7d6defd729b6e6536dcc95f326d345701d9 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -8,8 +8,8 @@
  * SPDX-License-Identifier: EPL-2.0
  *
  ********************************************************************************/
-#ifndef __AIDGE_CORE_GRAPH_CONNECTOR_H__
-#define __AIDGE_CORE_GRAPH_CONNECTOR_H__
+#ifndef AIDGE_CORE_GRAPH_CONNECTOR_H_
+#define AIDGE_CORE_GRAPH_CONNECTOR_H_
 
 #include <cassert>
 #include <memory>
@@ -18,7 +18,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-    
+
 class Node;
 class GraphView;
 /**
@@ -83,4 +83,4 @@ class Connector {
 std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_GRAPH_CONNECTOR_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
\ No newline at end of file
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index e5fa35354968963859d0b4cbbc01139cbc309250..718eddeaf6a5d08c9dab4898f5a57c0192dcb80b 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -10,8 +10,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
-#define __AIDGE_CORE_GRAPH_GRAPHVIEW_H__
+#ifndef AIDGE_CORE_GRAPH_GRAPHVIEW_H_
+#define AIDGE_CORE_GRAPH_GRAPHVIEW_H_
 
 #include <map>
 #include <memory>
@@ -33,14 +33,14 @@ enum class DataType;
 class GraphView : public std::enable_shared_from_this<GraphView> {
 private:
     /// @brief Name of the graphview
-    std::string mName; 
+    std::string mName;
 
     /// @brief Set of nodes included in the GraphView
-    std::set<NodePtr> mNodes; 
+    std::set<NodePtr> mNodes;
 
     /// @brief Set of nodes included in the graphview with names
     std::map<std::string, NodePtr> mNodeRegistry;
-    
+
     /// @brief Nodes without input link
     std::set<NodePtr> mInputNodes;
 
@@ -49,23 +49,23 @@ private:
 
 public:
     GraphView(std::string name="")
-        : mName(name) 
+        : mName(name)
     {
         // ctor
     }
 
     // GraphView(std::set<NodePtr> nodes, std::string name="")
-    //     : mName(name) 
+    //     : mName(name)
     // {
     //     add(nodes);
     // }
 
-    bool operator==(const GraphView &gv) const 
+    bool operator==(const GraphView &gv) const
     {
         return mNodes == gv.mNodes;
     }
 
-    NodePtr operator[](std::string name) 
+    NodePtr operator[](std::string name)
     {
         assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView.");
         return mNodeRegistry.at(name);
@@ -185,7 +185,7 @@ public:
     /**
      * @brief Get parents Nodes of the specified Node.
      * @param nodeName Name of the Node.
-     * @return std::vector<NodePtr> 
+     * @return std::vector<NodePtr>
      */
     std::vector<NodePtr> getParents(const std::string nodeName) const;
     std::vector<std::vector<NodePtr>> getOrderedParents() const;
@@ -206,7 +206,7 @@ public:
 
     /**
      * @brief Get the Nodes pointed to by the GraphView object.
-     * @return std::set<NodePtr> 
+     * @return std::set<NodePtr>
      */
     inline std::set<NodePtr> getNodes() const { return mNodes; }
 
@@ -233,14 +233,14 @@ public:
     /**
      * @brief Include a Node to the current GraphView object.
      * @param other_Nde Node to add.
-     * @param includeLearnableParam Include non-data inputs, like weights and biases 
+     * @param includeLearnableParam Include non-data inputs, like weights and biases
      * in the GraphView automatically. Default: true.
      */
     void add(NodePtr otherNode, bool includeLearnableParam = true);
     /**
      * @brief Include a set of Nodes to the current GraphView object.
-     * @param otherNodes 
-     * @param includeLearnableParam 
+     * @param otherNodes
+     * @param includeLearnableParam
      */
     void add(std::set<NodePtr> otherNodes,
              bool includeLearnableParam = true);
@@ -326,8 +326,8 @@ public:
     /**
      * @brief Replace the current GraphView with the set of given Nodes if possible
      * @param newNodes Set of Nodes.
-     * @return true 
-     * @return false 
+     * @return true
+     * @return false
      */
     bool replaceWith(std::set<NodePtr> newNodes);
     void updateInputNodes();
@@ -343,13 +343,13 @@ private:
 
     /**
      * @brief Get the sum of the number of dataInput Nodes for all inputNodes of the GraphView object.
-     * @return IOIndex_t 
+     * @return IOIndex_t
      */
     IOIndex_t getNbDataInputs() const;
 
     /**
      * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
-     * @return IOIndex_t 
+     * @return IOIndex_t
      */
     IOIndex_t getNbFreeDataInputs() const;
 
@@ -378,4 +378,4 @@ private:
 };
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_GRAPH_GRAPHVIEW_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */
\ No newline at end of file
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 0780ce9a24da0ceb0c42b32944021f5df2fa9726..f056505e6e7839266213ac355cc0e1b93ab98f0d 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_GRAPH_NODE_H__
-#define __AIDGE_CORE_GRAPH_NODE_H__
+#ifndef AIDGE_CORE_GRAPH_NODE_H_
+#define AIDGE_CORE_GRAPH_NODE_H_
 
 #include <cassert>
 #include <memory>
@@ -39,7 +39,7 @@ private:
           // Compare the content of the weak_ptrs
           auto sharedA = a.lock();
           auto sharedB = b.lock();
-          if (!sharedB) return false; // nothing after expired pointer 
+          if (!sharedB) return false; // nothing after expired pointer
           if (!sharedA) return true;
           return sharedA < sharedB; // shared_ptr has a valid comparison operator
       }
@@ -78,7 +78,7 @@ public:
   /**
    * @brief Functional operator for user-friendly connection interface using an ordered set of Connectors.
    * @param ctors Ordered Connectors linking their associated Node to the input of the current Node with the same index.
-   * @return Connector 
+   * @return Connector
    */
   Connector operator()(const std::vector<Connector> &ctors);
 
@@ -165,7 +165,7 @@ public:
 
   /**
    * @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
-   * 
+   *
    * @param idx Input index.
    * @param tensor Constant Tensor to add as parent for specified index.
    */
@@ -301,7 +301,7 @@ public:
   /**
    * @brief Get the pointer to parent of the specified input index. This pointer is nullptr if no parent is linked.
    * @param inId Input index.
-   * @return std::shared_ptr<Node>& 
+   * @return std::shared_ptr<Node>&
    */
   inline NodePtr &getParents(const IOIndex_t inId) {
     assert(inId != gk_IODefaultIndex);
@@ -312,7 +312,7 @@ public:
    * @brief Unlink the parent Node at the specified input index and return its pointer.
    * Return a nullptr is no parent was linked.
    * @param inId Input index.
-   * @return std::shared_ptr<Node> 
+   * @return std::shared_ptr<Node>
    */
   NodePtr popParent(const IOIndex_t inId);
 
@@ -331,7 +331,7 @@ public:
   /**
    * @brief Get the list of children Nodes linked to the output at specified index.
    * @param outId Output index.
-   * @return std::vector<std::shared_ptr<Node>> 
+   * @return std::vector<std::shared_ptr<Node>>
    */
   std::vector<NodePtr> getChildren(const IOIndex_t outId) const;
 
@@ -364,8 +364,8 @@ private:
 
   /**
    * @brief Set the idInChildren parameter.
-   * @param inID 
-   * @param newNodeOutID 
+   * @param inID
+   * @param newNodeOutID
    */
   void setInputId(const IOIndex_t inID, const IOIndex_t newNodeOutID);
 
@@ -375,17 +375,17 @@ private:
 
   /**
    * @brief Add the given Node as a child for the current Node.
-   * @param otherNode 
-   * @param outId 
-   * @param otherInId 
+   * @param otherNode
+   * @param outId
+   * @param otherInId
    */
   void addChildOp(NodePtr otherNode, const IOIndex_t outId,
                   const IOIndex_t otherInId);
 
   /**
    * @brief Add the given GraphView's input Node as a child for the current Node
-   * @param otherGraph 
-   * @param outId 
+   * @param otherGraph
+   * @param outId
    * @param otherInId pointer the GraphView's input Node and its input index. Defaults to the
    * only input Node if the GraphView has got one.
    */
@@ -402,4 +402,4 @@ private:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_CORE_GRAPH_NODE_H__ */
+#endif /* AIDGE_CORE_GRAPH_NODE_H_ */
diff --git a/include/aidge/graph/OpArgs.hpp b/include/aidge/graph/OpArgs.hpp
index dd0cfe1cca8a3f487c18875cff3f90cc56291107..560c3a02c641c29526752dbf352905d0ded32a7e 100644
--- a/include/aidge/graph/OpArgs.hpp
+++ b/include/aidge/graph/OpArgs.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_GRAPH_OPARGS_H__
-#define __AIDGE_CORE_GRAPH_OPARGS_H__
+#ifndef AIDGE_CORE_GRAPH_OPARGS_H_
+#define AIDGE_CORE_GRAPH_OPARGS_H_
 
 #include <memory>
 #include <cassert>
@@ -30,7 +30,7 @@ private:
 public:
     OpArgs(const std::shared_ptr<GraphView>& view_)
      : mView(view_) {assert(mView && "The GraphView provided should not be a nullptr.");}
-    
+
     OpArgs(const std::shared_ptr<Node>& node_)
      : mNode(node_) {assert(mNode && "The Node provided should not be a nullptr.");}
 
@@ -83,4 +83,4 @@ std::shared_ptr<GraphView> Residual(std::initializer_list<OpArgs> inputs);
 
 }
 
-#endif /* __AIDGE_CORE_GRAPH_OPARGS_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_GRAPH_OPARGS_H_ */
\ No newline at end of file
diff --git a/include/aidge/graphmatching/GRegex.hpp b/include/aidge/graphmatching/GRegex.hpp
index 1292b607cee35f50dc0acc5f5113946be103065e..fd2d0c52ab47e0f03b3307bdbcfcb5a7b81d78d9 100644
--- a/include/aidge/graphmatching/GRegex.hpp
+++ b/include/aidge/graphmatching/GRegex.hpp
@@ -10,8 +10,8 @@
  ********************************************************************************/
 
 
-#ifndef __AIDGE_GREGEX_H__
-#define __AIDGE_GREGEX_H__
+#ifndef AIDGE_GREGEX_H_
+#define AIDGE_GREGEX_H_
 
 #include <stdexcept>    // for exception, runtime_error, out_of_range
 #include <regex>
@@ -43,7 +43,7 @@ public:
     bool walk_validation_all_node_read_validate_by_one_stm(const std::vector<std::vector<SeqStm*>> all_stm);
 
     bool walk_validation_common_nodes_same_tag_for_all_stm(const std::vector<std::vector<SeqStm*>> all_stm);
-    
+
     std::set<NodeTmp> get_all_validate_nodes(const std::vector<std::vector<SeqStm*>> all_stm);
 
     std::vector<SeqStm*> getStmInit() const {
@@ -53,11 +53,11 @@ public:
     StmFactory getStmFab() const {
         return mStmFab;
     }
-    
+
     //std::set<std::pair<std::vector<NodeTmp>,std::set<NodeTmp>>> match(const std::shared_ptr<GraphView> graphToMatch);
     Match match(const std::shared_ptr<GraphView> graphToMatch);
 
 };
 
 }
-#endif //__AIDGE_GREGEX_H__
\ No newline at end of file
+#endif //AIDGE_GREGEX_H_
\ No newline at end of file
diff --git a/include/aidge/graphmatching/Match.hpp b/include/aidge/graphmatching/Match.hpp
index 27acc2e8a0880f8c62d0ba995fcde5479bdcb501..fc617a22869fde6531fba67c8641581572cbffc4 100644
--- a/include/aidge/graphmatching/Match.hpp
+++ b/include/aidge/graphmatching/Match.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_MATCH_H__
-#define __AIDGE_MATCH_H__
+#ifndef AIDGE_MATCH_H_
+#define AIDGE_MATCH_H_
 
 #include <vector>
 #include <set>
@@ -41,4 +41,4 @@ protected:
 };
 
 }
-#endif //__AIDGE_MATCH_H__
\ No newline at end of file
+#endif //AIDGE_MATCH_H_
\ No newline at end of file
diff --git a/include/aidge/graphmatching/NodeRegex.hpp b/include/aidge/graphmatching/NodeRegex.hpp
index 387bfea46f0147613a116beac1f9c6102ed661e5..10ba7225834e4abfb7f0f5cd45ffa91b22f2f87d 100644
--- a/include/aidge/graphmatching/NodeRegex.hpp
+++ b/include/aidge/graphmatching/NodeRegex.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_NODEREGEX_H__
-#define __AIDGE_NODEREGEX_H__
+#ifndef AIDGE_NODEREGEX_H_
+#define AIDGE_NODEREGEX_H_
 #include <cstdlib>
 #include <iostream>
 #include <cstring>
@@ -27,7 +27,7 @@ class NodeRegex
     NodeRegex(const std::string c){
         mCondition = c;
     };
-    
+
     // Version 1 - Only test the type of the node (no need for a lexer)
     // Input : Node_op
     // Output : bool
@@ -38,4 +38,4 @@ class NodeRegex
 
 }
 
-#endif /* ___AIDGE_NODEREGEX_H___ */
\ No newline at end of file
+#endif /* _AIDGE_NODEREGEX_H__ */
\ No newline at end of file
diff --git a/include/aidge/graphmatching/SeqStm.hpp b/include/aidge/graphmatching/SeqStm.hpp
index 6ccd6cfcd322c4d38af2ad04cd2b3a96d839e6cd..0823b5fc0f292d8cf28f7ead53d01bd8dd8adbfe 100755
--- a/include/aidge/graphmatching/SeqStm.hpp
+++ b/include/aidge/graphmatching/SeqStm.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_SEQSTM_H__
-#define __AIDGE_SEQSTM_H__
+#ifndef AIDGE_SEQSTM_H_
+#define AIDGE_SEQSTM_H_
 
 #include <iostream>
 #include <map>
@@ -124,4 +124,4 @@ public:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_SEQSTM_H__ */
\ No newline at end of file
+#endif /* AIDGE_SEQSTM_H_ */
\ No newline at end of file
diff --git a/include/aidge/graphmatching/StmFactory.hpp b/include/aidge/graphmatching/StmFactory.hpp
index 929fdaf3595038f21367768254040c45b291641b..b5850e4a00691ef6c808554a86a6ceec8c38ad19 100644
--- a/include/aidge/graphmatching/StmFactory.hpp
+++ b/include/aidge/graphmatching/StmFactory.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_STMFACTORY_H__
-#define __AIDGE_STMFACTORY_H__
+#ifndef AIDGE_STMFACTORY_H_
+#define AIDGE_STMFACTORY_H_
 
 #include <map>
 #include <utility>
@@ -52,4 +52,4 @@ private:
 };
 }
 
-#endif //__AIDGE_STMFACTORY_H__
\ No newline at end of file
+#endif //AIDGE_STMFACTORY_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 36e592682e61fbc178ed4623f88e9fa5f446f25d..de9afab203e78f83b1edfa294af1f9b3e90f209a 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_ADD_H__
-#define __AIDGE_CORE_OPERATOR_ADD_H__
+#ifndef AIDGE_CORE_OPERATOR_ADD_H_
+#define AIDGE_CORE_OPERATOR_ADD_H_
 
 #include <numeric>
 #include <vector>
@@ -94,7 +94,7 @@ public:
         return *(mInputs[inputIdx].get());
     }
     inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
-    
+
     inline std::shared_ptr<Tensor> getInput(const IOIndex_t inputIdx) const override final {
         assert(static_cast<std::size_t>(inputIdx) < NUM && "wrong inputIdx for Add operator.");
         return mInputs[inputIdx];
@@ -144,4 +144,4 @@ inline std::shared_ptr<Node> Add(const char* name = nullptr) {
 }
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_ADD_H__ */
+#endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index a86942d14e531e5974c8924d8dafb8a4d0bebf85..0dbd91e059656369cdf12a4dfab2a981806011d1 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
-#define __AIDGE_CORE_OPERATOR_AVGPOOLING_H__
+#ifndef AIDGE_CORE_OPERATOR_AVGPOOLING_H_
+#define AIDGE_CORE_OPERATOR_AVGPOOLING_H_
 
 #include <array>
 #include <numeric>
@@ -46,7 +46,7 @@ public:
     AvgPooling_Op() = delete;
 
     using Parameterizable_ = Parameterizable<AvgPoolingParam,
-                                             std::array<DimSize_t, DIM>, 
+                                             std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1)> >;
     template <AvgPoolingParam e>
@@ -76,7 +76,7 @@ public:
 
             for (std::size_t dim = 0; dim < this->template get<AvgPoolingParam::KernelDims>().size() ; ++dim) {
                 outputDims[dim+2] = 1 + static_cast<DimSize_t>(
-                                            std::floor(static_cast<float>(mInput->dims()[dim+2] - 
+                                            std::floor(static_cast<float>(mInput->dims()[dim+2] -
                                                                     this->template get<AvgPoolingParam::KernelDims>()[dim] +
                                                                     this->template get<AvgPoolingParam::PaddingDims>()[dim] +
                                                                     this->template get<AvgPoolingParam::PaddingDims>()[dim+DIM]) /
@@ -166,4 +166,4 @@ const char *const EnumStrings<Aidge::AvgPoolingParam>::data[] = {"StrideDims",
                                                           "KernelDims", "PaddingDims"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_AVGPOOLING_H__ */
+#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 6c64ae44c04f9a8f37d0dde14b251da94ce72a3f..01a6ccf4f7cf30843cac3dc819a686d3ad11530d 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_BATCHNORM_H__
-#define __AIDGE_CORE_OPERATOR_BATCHNORM_H__
+#ifndef AIDGE_CORE_OPERATOR_BATCHNORM_H_
+#define AIDGE_CORE_OPERATOR_BATCHNORM_H_
 
 #include <array>
 #include <memory>
@@ -53,7 +53,7 @@ public:
           Parameterizable_(param<BatchNormParam::Epsilon>(epsilon),
                            param<BatchNormParam::Momentum>(momentum)),
           mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);        
+        setDatatype(DataType::Float32);
     }
 
     // Data operator[](const char* inputName) override final {
@@ -158,4 +158,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormParam>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif // __AIDGE_CORE_OPERATOR_BATCHNORM_H__
\ No newline at end of file
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index babeac443dd8d51a8b9d3de5a2e96b8745636060..1fcea7a4b5d654d3ec1a8661c03912b6074918c3 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_CONV_H__
-#define __AIDGE_CORE_OPERATOR_CONV_H__
+#ifndef AIDGE_CORE_OPERATOR_CONV_H_
+#define AIDGE_CORE_OPERATOR_CONV_H_
 
 #include <array>
 #include <cmath>
@@ -63,7 +63,7 @@ public:
                            param<ConvParam::KernelDims>(kernel_dims),
                            param<ConvParam::PaddingDims>(padding_dims)),
           mOutput(std::make_shared<Tensor>()) {
-        setDatatype(DataType::Float32);        
+        setDatatype(DataType::Float32);
     }
 
     // Data operator[](const char* inputName) override final {
@@ -161,7 +161,7 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Conv(DimSize_t in_channels, 
+inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
                                   DimSize_t out_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
                                   const char *name = nullptr,
@@ -197,4 +197,4 @@ const char *const EnumStrings<Aidge::ConvParam>::data[] = {"StrideDims", "Dilati
                                                           "KernelDims", "PaddingDims"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_CONV_H__ */
+#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 7cbc609798064e993c7744fdf08865d897518a89..4a4af179b454f231ae2ce8dbafeb319795a76861 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__
-#define __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__
+#ifndef AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_
+#define AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_
 
 #include <array>
 #include <cmath>
@@ -49,9 +49,9 @@ class ConvDepthWise_Op : public Operator,
     ConvDepthWise_Op() = delete;
 
     using Parameterizable_ = Parameterizable<ConvDepthWiseParam,
-                                             std::array<DimSize_t, DIM>, 
                                              std::array<DimSize_t, DIM>,
-                                             DimSize_t, 
+                                             std::array<DimSize_t, DIM>,
+                                             DimSize_t,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, (DIM<<1) >>;
     template <ConvDepthWiseParam e>
@@ -62,7 +62,7 @@ class ConvDepthWise_Op : public Operator,
                                const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : Operator(Type),
-          Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims), 
+          Parameterizable_(param<ConvDepthWiseParam::StrideDims>(stride_dims),
                            param<ConvDepthWiseParam::DilationDims>(dilation_dims),
                            param<ConvDepthWiseParam::Channels>(0),
                            param<ConvDepthWiseParam::KernelDims>(kernel_dims),
@@ -130,7 +130,7 @@ class ConvDepthWise_Op : public Operator,
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < 3 && "operators supports only 3 inputs");
         return std::static_pointer_cast<Data>(mInputs[inputIdx]);
-    }    
+    }
     std::shared_ptr<Data> getRawOutput(__attribute__((unused)) const IOIndex_t outputIdx) const override final {
         assert(outputIdx == 0 && "operator supports only 1 output");
         return std::static_pointer_cast<Data>(mOutput);
@@ -193,4 +193,4 @@ const char *const EnumStrings<Aidge::ConvDepthWiseParam>::data[] = {"StrideDims"
                                                           "KernelDims", "PaddingDims"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H__ */
+#endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index ebd3a8826dbca292b57f4d3cae749f4e1d7968c8..74362044349a087896cbc231f86818c89187c568 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_FC_H__
-#define __AIDGE_CORE_OPERATOR_FC_H__
+#ifndef AIDGE_CORE_OPERATOR_FC_H_
+#define AIDGE_CORE_OPERATOR_FC_H_
 
 #include <array>
 #include <cmath>
@@ -75,7 +75,7 @@ public:
             std::array<DimSize_t, 2> weightDims = {this->template get<FCParam::OutChannels>(), static_cast<DimSize_t>(mInputs[0]->sizeM1())};
             // <out_channels, batch>
             std::array<DimSize_t, 2> outputDims = {mInputs[0]->dims()[0], this->template get<FCParam::OutChannels>()};
-            
+
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
         }
@@ -152,4 +152,4 @@ const char *const EnumStrings<Aidge::FCParam>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_FC_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 86b96bfaa8bf0eb5ab52fa542f169708ff8d09ca..7efbe62a410ed21704d2a34d46a295202c43c31e 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__
-#define __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__
+#ifndef AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_
+#define AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_
 
 #include <memory>
 #include <vector>
@@ -163,4 +163,4 @@ inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataI
 }
 }  // namespace Aidge
 
-#endif /* __AIDGE_CORE_OPERATOR_GENERICOPERATOR_H__ */
+#endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index ed967001a23a6b9dd4cfe5db09ec4f1edd60e5ea..058f41fd85516db97a66de448d0f4b1b61254021 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_LEAKYRELU_H__
-#define __AIDGE_CORE_OPERATOR_LEAKYRELU_H__
+#ifndef AIDGE_CORE_OPERATOR_LEAKYRELU_H_
+#define AIDGE_CORE_OPERATOR_LEAKYRELU_H_
 
 #include <vector>
 #include <memory>
@@ -73,7 +73,7 @@ public:
     inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
         assert((inputIdx == 0) && "LeakyReLU Operator has only 1 input");
         return mInput;
     }
@@ -124,4 +124,4 @@ const char* const EnumStrings<Aidge::LeakyReLUParam>::data[]
     = {"NegativeSlope"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp
index a871fe516c95802fdb67e81ca3f58fb3be4dce25..b6181989cf8fa583bf87ac45b2af8d0491e387be 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/Matmul.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_MATMUL_H__
-#define __AIDGE_CORE_OPERATOR_MATMUL_H__
+#ifndef AIDGE_CORE_OPERATOR_MATMUL_H_
+#define AIDGE_CORE_OPERATOR_MATMUL_H_
 
 #include <array>
 #include <cmath>
@@ -67,7 +67,7 @@ public:
             std::array<DimSize_t, 2> weightDims = {static_cast<DimSize_t>(mInputs[0]->size()), this->template get<MatmulParam::OutChannels>()};
             // <out_channels, batch>
             std::array<DimSize_t, 1> outputDims = {this->template get<MatmulParam::OutChannels>()};
-            
+
             mInputs[1]->resize(weightDims);
             mOutput->resize(outputDims);
         }
@@ -140,4 +140,4 @@ template <>
 const char *const EnumStrings<Aidge::MatmulParam>::data[] = {"OutChannels"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR__MATMUL_H__ */
+#endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 7fa1a20449d055da9cd25e6dc4f987757aca3f4a..35a59b56cbf5c10a78116f81de96a8baddc03ff0 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_METAOPERATOR_H__
-#define __AIDGE_CORE_OPERATOR_METAOPERATOR_H__
+#ifndef AIDGE_CORE_OPERATOR_METAOPERATOR_H_
+#define AIDGE_CORE_OPERATOR_METAOPERATOR_H_
 
 #include "aidge/operator/Operator.hpp"
 
@@ -25,4 +25,4 @@ public:
 };
 }
 
-#endif /* MetaOperator_H__ */
+#endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index 9f24ce884863776f6856ee03fb4feb089e6323e2..30e1ce2a7f664485077282405ec60ddf49513cb5 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_OPERATOR_H__
-#define __AIDGE_CORE_OPERATOR_OPERATOR_H__
+#ifndef AIDGE_CORE_OPERATOR_OPERATOR_H_
+#define AIDGE_CORE_OPERATOR_OPERATOR_H_
 
 #include <memory>
 #include <string>
@@ -58,23 +58,23 @@ public:
     /**
      * @brief Minimum amount of data from a specific input for one computation pass.
      * @param inputIdx Index of the input analysed.
-     * @return NbElts_t 
+     * @return NbElts_t
      */
     NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data from a specific input actually used in one computation pass.
-     * 
+     *
      * @param inputIdx Index of the input analysed.
-     * @return NbElts_t 
+     * @return NbElts_t
      */
     NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const;
 
     /**
      * @brief Amount of data ready to be used on a specific output.
-     * 
+     *
      * @param outputIdx Index of the output analysed.
-     * @return NbElts_t 
+     * @return NbElts_t
      */
     NbElts_t getNbProducedData(const IOIndex_t outputIdx) const;
 
@@ -96,4 +96,4 @@ public:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_CORE_OPERATOR_OPERATOR_H__ */
+#endif /* AIDGE_CORE_OPERATOR_OPERATOR_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 4d5461957826e9ebea4a39bb9a7618604e80797a..4268b46b2f0facdaf0d167f16ae01ba8cba2c01d 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_PRODUCER_H__
-#define __AIDGE_CORE_OPERATOR_PRODUCER_H__
+#ifndef AIDGE_CORE_OPERATOR_PRODUCER_H_
+#define AIDGE_CORE_OPERATOR_PRODUCER_H_
 
 #include <array>
 #include <vector>
@@ -141,4 +141,4 @@ void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, Dim
 }
 } // namespace Aidge
 
-#endif /* __AIDGE_CORE_OPERATOR_PRODUCER_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 93bc9a74091c2893dc7b1f7fcc34c72828f34f27..b969a76bc24a6fa41288b009934a3c060289b597 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_RELU_H__
-#define __AIDGE_CORE_OPERATOR_RELU_H__
+#ifndef AIDGE_CORE_OPERATOR_RELU_H_
+#define AIDGE_CORE_OPERATOR_RELU_H_
 
 #include <cassert>
 #include <memory>
@@ -62,7 +62,7 @@ public:
     inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
         assert((inputIdx == 0) && "ReLU Operator has only 1 input");
         return mInput;
     }
@@ -107,4 +107,4 @@ inline std::shared_ptr<Node> ReLU(const char* name = nullptr) {
 }
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 9be2acde8570bdc250054e9bed7a1b0d5c3e52ff..3d3cb8204e4b133b944521cf84117b5d16ac83df 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_OPERATOR_SOFTMAX_H__
-#define __AIDGE_CORE_OPERATOR_SOFTMAX_H__
+#ifndef AIDGE_CORE_OPERATOR_SOFTMAX_H_
+#define AIDGE_CORE_OPERATOR_SOFTMAX_H_
 
 #include <cassert>
 #include <memory>
@@ -62,7 +62,7 @@ public:
     inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
 
 
-    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final { 
+    inline std::shared_ptr<Tensor> getInput(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
         assert((inputIdx == 0) && "Softmax Operator has only 1 input");
         return mInput;
     }
@@ -107,4 +107,4 @@ inline std::shared_ptr<Node> Softmax(const char* name = nullptr) {
 }
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_SOFTMAX_H__ */
+#endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 2abe90e111c0997928d270b149a6ab4a460eb3aa..81b3f31662933fe4f59a17cdb0ee42441fb791bc 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_SCHEDULER_H__
-#define __AIDGE_SCHEDULER_H__
+#ifndef AIDGE_SCHEDULER_H_
+#define AIDGE_SCHEDULER_H_
 
 #include <chrono>
 #include <memory>
@@ -57,9 +57,9 @@ public:
 private:
     /**
      * @brief Set of layers receiving an input from currently processing layers
-     * 
+     *
      * @param producers Set of layers ready to run.
-     * @return std::set<std::shared_ptr<Node>> 
+     * @return std::set<std::shared_ptr<Node>>
      */
     std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const;
 
@@ -68,4 +68,4 @@ private:
 };
 } // namespace Aidge
 
-#endif /* __AIDGE_SCHEDULER_H__ */
\ No newline at end of file
+#endif /* AIDGE_SCHEDULER_H_ */
\ No newline at end of file
diff --git a/include/aidge/utils/CParameter.hpp b/include/aidge/utils/CParameter.hpp
index 64943ff58eae9a06fe50afb1b81deea1b66e90ea..0f4c74ab8bccb7bc134e035a5f12d31d51663e5d 100644
--- a/include/aidge/utils/CParameter.hpp
+++ b/include/aidge/utils/CParameter.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CPARAMETER_H__
-#define __AIDGE_CPARAMETER_H__
+#ifndef AIDGE_CPARAMETER_H_
+#define AIDGE_CPARAMETER_H_
 
 #include <assert.h>
 #include <map>
@@ -112,4 +112,4 @@ private:
 
 }
 
-#endif /* __AIDGE_CPARAMETER_H__ */
+#endif /* AIDGE_CPARAMETER_H_ */
diff --git a/include/aidge/utils/Parameter.hpp b/include/aidge/utils/Parameter.hpp
index 6a8fcca41ff03951eeac80493cd9f86a2ea3586b..b0c6e35950187f17d991cfe5b2c9bd2b09f1e70f 100644
--- a/include/aidge/utils/Parameter.hpp
+++ b/include/aidge/utils/Parameter.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_UTILS_PARAMETER_H__
-#define __AIDGE_CORE_UTILS_PARAMETER_H__
+#ifndef AIDGE_CORE_UTILS_PARAMETER_H_
+#define AIDGE_CORE_UTILS_PARAMETER_H_
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -40,23 +40,23 @@ constexpr std::size_t size(T (&)[N]) { return N; }
 #ifdef PYBIND
 /* This abstract class allows to avoid binding Parametrizable.
 *  Otherwise we would need to bind every template possible of Parametrizable.
-*  Every operators can access the methods of this class by inheriting from 
+*  Every operators can access the methods of this class by inheriting from
 *  PyAbstractParametrizable in the binding code.
 */
-class PyAbstractParametrizable{ 
+class PyAbstractParametrizable{
     public:
         /* Bindable get function, does not recquire any templating.
         *  This is thanks to py::object which allow the function to
         *  be agnostic from its return type.
         */
         virtual py::object getPy(const char* /*name*/) = 0;
-}; 
+};
 #endif
 
 template <class PARAM_ENUM, class ...T>
 class Parameterizable
 #ifdef PYBIND
-    : public PyAbstractParametrizable 
+    : public PyAbstractParametrizable
 #endif
     {
 public:
@@ -99,7 +99,7 @@ public:
     constexpr typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() {
         return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
     }
-    
+
     template <PARAM_ENUM paramEnum>
     constexpr const typename std::tuple_element<static_cast<std::size_t>(paramEnum),std::tuple<T...>>::type& get() const {
         return std::get<static_cast<std::size_t>(paramEnum)>(mParams);
@@ -194,4 +194,4 @@ private:
 };
 }
 
-#endif /* AIDGE_CORE_UTILS_PARAMETER_H__ */
+#endif /* AIDGE_CORE_UTILS_PARAMETER_H_ */
diff --git a/include/aidge/utils/Recipies.hpp b/include/aidge/utils/Recipies.hpp
index d6104c56ce288d260ac78c5eb9d1e83d75ca34c8..4cbf8fd284bef314dbe28b19ebdae05172467bad 100644
--- a/include/aidge/utils/Recipies.hpp
+++ b/include/aidge/utils/Recipies.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_UTILS_RECIPIES_H__
-#define __AIDGE_CORE_UTILS_RECIPIES_H__
+#ifndef AIDGE_CORE_UTILS_RECIPIES_H_
+#define AIDGE_CORE_UTILS_RECIPIES_H_
 
 #include "aidge/graph/Node.hpp"
 #include "aidge/graph/GraphView.hpp"
@@ -24,4 +24,4 @@ void removeFlatten(std::set<std::shared_ptr<Node>> nodes);
 }
 
 
-#endif /* __AIDGE_CORE_UTILS_RECIPIES_H__ */
\ No newline at end of file
+#endif /* AIDGE_CORE_UTILS_RECIPIES_H_ */
\ No newline at end of file
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 8348eb98d3f3ab4da0873c8b3f4a476a9f8e1afc..98749c1349bad644dee2c1a8549559939791f71c 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef __AIDGE_CORE_UTILS_REGISTRAR_H__
-#define __AIDGE_CORE_UTILS_REGISTRAR_H__
+#ifndef AIDGE_CORE_UTILS_REGISTRAR_H_
+#define AIDGE_CORE_UTILS_REGISTRAR_H_
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
@@ -68,8 +68,8 @@ struct Registrar {
         for(auto keyValue : C::registry())
             keys.push_back(keyValue.first);
         return keys;
-    }    
+    }
 };
 }
 
-#endif // __AIDGE_CORE_UTILS_REGISTRAR_H__
\ No newline at end of file
+#endif //AIDGE_CORE_UTILS_REGISTRAR_H_
\ No newline at end of file
diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h
index d05c64ead0e147a8d66c7f40dbd978283401683a..d65279f1f4d36498ea7653428332690fc99a5def 100644
--- a/include/aidge/utils/Types.h
+++ b/include/aidge/utils/Types.h
@@ -10,8 +10,8 @@
  ********************************************************************************/
 
 
-#ifndef __AIDGE_TYPES_H__
-#define __AIDGE_TYPES_H__
+#ifndef AIDGE_TYPES_H_
+#define AIDGE_TYPES_H_
 
 #include <limits>
 #include <type_traits>
@@ -59,4 +59,4 @@ constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max() - 1;
 
 } // namespace Aidge
 
-#endif // __AIDGE_TYPES_H__
\ No newline at end of file
+#endif //AIDGE_TYPES_H_
\ No newline at end of file