diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3e490964dfa0da35743a9704fe4dc1f5aeeebfec..d9892b6bebe2f6e20ef5575f6e621780d461ef85 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -67,14 +67,14 @@ target_compile_features(${module_name} PRIVATE cxx_std_14)
 if(WERROR)
     target_compile_options(${module_name} PRIVATE
     $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-    -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>)
+    -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Werror>)
     target_compile_options(${module_name} PRIVATE
     $<$<CXX_COMPILER_ID:MSVC>:
     /W4>)
 else()
     target_compile_options(${module_name} PRIVATE
         $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:
-        -Wall -Wextra -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
+        -Wall -Wextra -fPIC -Wold-style-cast -Winline -pedantic -Werror=narrowing -Wshadow -Wpedantic>)
         target_compile_options(${module_name} PRIVATE
         $<$<CXX_COMPILER_ID:MSVC>:
         /W4>)
diff --git a/Makefile b/Makefile
deleted file mode 100644
index d10afab29d3e867632f284ee993929ddaad210ac..0000000000000000000000000000000000000000
--- a/Makefile
+++ /dev/null
@@ -1,36 +0,0 @@
-# This makefile does nothing but delegating the actual building to cmake
-BUILDDIR := build
-MAKEFLAGS := --no-print-directory
-
-all: core_with_pybind
-
-core_only:
-	mkdir -p ${BUILDDIR}; \
-	cd ${BUILDDIR}; \
-	cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Release -DPYBIND=OFF -DTESTS=OFF ..; \
-	${MAKE} ${MAKEFLAGS};
-
-core_tests:
-	mkdir -p ${BUILDDIR}; \
-	cd ${BUILDDIR}; \
-	cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Debug -DPYBIND=OFF -DTESTS=ON ..; \
-	${MAKE} ${MAKEFLAGS}; \
-	cd tests; \
-	ctest --output-on-failure || true;
-
-core_with_pybind:
-	mkdir -p ${BUILDDIR}; \
-	cd ${BUILDDIR}; \
-	cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Release -DPYBIND=ON -DTESTS=OFF ..; \
-	${MAKE} ${MAKEFLAGS};
-
-core_with_pybind_tests:
-	mkdir -p ${BUILDDIR}; \
-	cd ${BUILDDIR}; \
-	cmake -DBUILD_CORE_ALONE=ON -DCMAKE_BUILD_TYPE=Debug -DPYBIND=ON -DTESTS=ON ..; \
-	${MAKE} ${MAKEFLAGS}; \
-	cd tests; \
-	ctest --output-on-failure || true;
-
-clean:
-	if [ -d "${BUILDDIR}" ]; then rm -rf ${BUILDDIR}; fi
\ No newline at end of file
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index bd2c7317fd0d8cca7f330cee013ff6b8de7b3e85..01e2a5a51d86c28d3a89bd9085c60bfad297623f 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -92,7 +92,7 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a, std::index_sequen
 /**
  * @brief Create a new array concatenating the initial one with the value to
  * add.
- * @details append([1,2,7], 3) -> [1,2,7,3]
+ * @details append({1,2,7}, 3) -> {1,2,7,3}
  *
  * @tparam T Data type.
  * @tparam N Number of elements in the initilial array.
@@ -131,25 +131,44 @@ struct Array4D {
     T data[SIZE_0][SIZE_1][SIZE_2][SIZE_3];
 };
 
+/**
+ * @brief Description for the tensor data structure.
+ * @details Sets the properties of the tensor without actually containing any data.
+ * Contains a pointer to an actual contiguous implementation of data.
+ */
 class Tensor : public Data,
                public Registrable<Tensor, std::tuple<std::string, DataType>, std::unique_ptr<TensorImpl>(const Tensor &)> {
    private:
-    DataType mDataType;
-    std::vector<DimSize_t> mDims;
-    std::unique_ptr<TensorImpl> mImpl;
-    std::shared_ptr<Tensor> mGrad;
+    DataType mDataType; /** enum to specify data type. */
+    std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
+    std::unique_ptr<TensorImpl> mImpl; /** Pointer to the actual data implementation. */
+    std::shared_ptr<Tensor> mGrad; /** Pointer to the associated gradient Tensor instance. */
 
     // Cached data
-    std::size_t mSize;    // number of elements in the tensor
-    std::size_t mSizeM1;  // for a tensor of N dimensions, number of elements in the N-1
-                     // first dimensions
+    std::size_t mSize;    /** Number of elements in the Tensor. */
+    std::size_t mSizeM1;  /** Number of elements in the N-1 first dimensions */
 
    public:
     static constexpr const char *Type = "Tensor";
 
-    Tensor(DataType dataType = DataType::Float32) : Data(Type), mDataType(dataType), mDims({}), mSize(0), mSizeM1(0) {
+    /**
+     * @brief Construct a new empty Tensor object.
+     * @param dataType Sets the type of inserted data.
+     */
+    Tensor(DataType dataType = DataType::Float32)
+        : Data(Type), 
+          mDataType(dataType), 
+          mDims({}), 
+          mSize(0), 
+          mSizeM1(0)
+    {
         // ctor
     }
+
+    /**
+     * @brief Construct a new Tensor object copied from another one.
+     * @param otherTensor 
+     */
     Tensor(const Tensor& otherTensor)
         : Data(Type), 
           mDataType(otherTensor.mDataType), 
@@ -163,6 +182,11 @@ class Tensor : public Data,
         }
     }
 
+    /**
+     * @brief Construct a new Tensor object from the 1-dimension Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     */
     template <typename T, std::size_t SIZE_0>
     constexpr Tensor(Array1D<T, SIZE_0> &&arr)
         : Data(Type),
@@ -183,6 +207,13 @@ class Tensor : public Data,
         mImpl->copy(&arr.data[0], SIZE_0);
         return *this;
     }
+
+    /**
+     * @brief Construct a new Tensor object from the 2-dimensions Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     * @tparam SIZE_1 second array dimension.
+     */
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1>
     constexpr Tensor(Array2D<T, SIZE_0, SIZE_1> &&arr)
         : Data(Type),
@@ -203,6 +234,14 @@ class Tensor : public Data,
         mImpl->copy(&arr.data[0][0], SIZE_0 * SIZE_1);
         return *this;
     }
+
+    /**
+     * @brief Construct a new Tensor object from the 3-dimensions Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     * @tparam SIZE_1 second array dimension.
+     * @tparam SIZE_2 third array dimension.
+     */
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2>
     constexpr Tensor(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr)
         : Data(Type),
@@ -223,6 +262,15 @@ class Tensor : public Data,
         mImpl->copy(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
         return *this;
     }
+
+    /**
+     * @brief Construct a new Tensor object from the 4-dimensions Array helper.
+     * @tparam T datatype
+     * @tparam SIZE_0 first array dimension.
+     * @tparam SIZE_1 second array dimension.
+     * @tparam SIZE_2 third array dimension.
+     * @tparam SIZE_3 fourth array dimension.
+     */
     template <typename T, std::size_t SIZE_0, std::size_t SIZE_1, std::size_t SIZE_2, std::size_t SIZE_3>
     constexpr Tensor(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr)
         : Data(Type),
@@ -244,6 +292,11 @@ class Tensor : public Data,
         return *this;
     }
 
+    /**
+     * @brief Copy dimensions, datatype and data of another Tensor.
+     * @param t other Tensor object.
+     * @return Tensor&
+     */
     Tensor &operator=(const Tensor &t) {
         resize(t.dims());
         setDatatype(t.dataType());
@@ -257,6 +310,10 @@ class Tensor : public Data,
         return *this;
     }
 
+    /**
+     * @brief Assess data type, dimensions, backend and data are the same.
+     * @param otherTensor 
+     */
     bool operator==(const Tensor &otherTensor) const {
         if ((!mImpl && !otherTensor.mImpl) || (dataType() != otherTensor.dataType()) ||
             (dims() != otherTensor.dims()) || (mImpl->backend() != otherTensor.mImpl->backend())) {
@@ -265,6 +322,11 @@ class Tensor : public Data,
         return *mImpl == *(otherTensor.mImpl);
     }
 
+    /**
+     * @brief Set the backend of the Tensor associated implementation
+     * @details Create and initialized an implementation if non was associated.
+     * @param name 
+     */
     inline void setBackend(const std::string &name) {
         if (mImpl) {
             if (strcmp(mImpl->backend(), name.c_str()) != 0) {
@@ -277,6 +339,11 @@ class Tensor : public Data,
         } else
             mImpl = Registrar<Tensor>::create({name, mDataType})(*this);
     }
+
+    /**
+     * @brief Get a list of available backends.
+     * @return std::set<std::string> 
+     */
     static std::set<std::string> getAvailableBackends(){
         std::set<std::string> backendsList;
         for(std::tuple<std::string, DataType> tupleKey : Registrar<Tensor>::getKeys())
@@ -284,6 +351,10 @@ class Tensor : public Data,
         return backendsList;
     }
 
+    /**
+     * @brief Get the data type enum.
+     * @return constexpr DataType 
+     */
     constexpr DataType dataType() const { return mDataType; }
 
     /**
@@ -303,39 +374,78 @@ class Tensor : public Data,
         mDataType = dt;
     }
 
+    /**
+     * @brief Get the Impl object
+     * @return constexpr const std::unique_ptr<TensorImpl>& 
+     */
     constexpr const std::unique_ptr<TensorImpl> &getImpl() { return mImpl; }
 
-    bool hasImpl() const
-    {
-        return (mImpl) ? true : false;
-    }
+    /**
+     * @brief Return if an implementaiton has been associated.
+     * @return true 
+     * @return false 
+     */
+    bool hasImpl() const { return (mImpl) ? true : false; }
 
+    /**
+     * @brief Get number of dimensions of the Tensor.
+     * @return std::size_t 
+     */
     inline std::size_t nbDims() const { return mDims.size(); }
 
+    /**
+     * @brief Get dimensions of the Tensor object.
+     * @tparam DIM number of dimensions.
+     * @return constexpr std::array<DimSize_t, DIM> 
+     */
     template <DimIdx_t DIM>
     constexpr std::array<DimSize_t, DIM> dims() const {
         assert(DIM == mDims.size() && "wrong number of dimensions");
         return to_array<DIM>(mDims.cbegin());
     }
 
+    /**
+     * @brief Get dimensions of the Tensor object.
+     * @return constexpr const std::vector<DimSize_t>& 
+     */
     constexpr const std::vector<DimSize_t> &dims() const { return mDims; }
 
+    /**
+     * @brief Get the number of elements in the Tensor object.
+     * @return constexpr std::size_t 
+     */
     constexpr std::size_t size() const { return mSize; }
 
+    /**
+     * @brief Get the number of elements in the N-1 dimensions of the Tensor object.
+     * @return constexpr std::size_t 
+     */
     constexpr std::size_t sizeM1() const { return mSizeM1; }
 
-// deducing std::array size_type and declaring DIM accordingly
-    template <std::array<DimSize_t, 1>::size_type DIM>
+    /**
+     * @brief Change the shape of the Tensor object according to the given argument.
+     * @tparam DIM new dimensions.
+     * @param dims 
+     */
+    template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
     void resize(const std::array<DimSize_t, DIM> &dims) {
         static_assert(DIM<=MaxDim,"Too many tensor dimensions required by resize, not supported");
         mDims.assign(dims.begin(), dims.end());
         computeSize();
     }
+
     void resize(const std::vector<DimSize_t> &dims) {
         mDims = dims;
         computeSize();
     }
+
+    /**
+     * @brief Return if the Tensor object has at leastone element.
+     * @return true 
+     * @return false 
+     */
     bool empty() const { return mDims.empty(); }
+
     template <typename expectedType, std::array<std::size_t, 1>::size_type DIM>
     constexpr expectedType &get(std::array<std::size_t, DIM> idx) {
         assert(DIM == mDims.size());
@@ -349,6 +459,7 @@ class Tensor : public Data,
     }
 
     std::string toString() {
+        if (dims().empty()) { return "{}"; }
         std::string res;
         std::size_t dim = 0;
         std::size_t *dimVals = new std::size_t[nbDims()];
@@ -449,7 +560,7 @@ class Tensor : public Data,
     }
 
 private:
-    ///\bug not protected against overflow, see ThaliaCommonPack for a solution
+    ///\bug not protected against overflow
     std::size_t computeSize() {
         if (mDims.empty()) {
             mSizeM1 = DimSize_t(0);
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index 796602a3018841261aa91ff9d1da231b6d95ccff..c5dde5c97c61d3661c1ee9cebe7cc17080950eb9 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -60,7 +60,7 @@ class Connector {
     }
 
    public:
-    IONb_t size() const;
+    IOIndex_t size() const;
 
     inline std::shared_ptr<Node> node() const { return mNode; }
 
@@ -68,7 +68,7 @@ class Connector {
 
    private:
     Connector(std::shared_ptr<Node> node, IOIndex_t index) : mNode(node) {
-        assert((index >= 0) && (static_cast<IONb_t>(index) < size()) &&
+        assert((index != gk_IODefaultIndex) && (index < size()) &&
                "Non-valid output index.\n");
         mOutputId = index;
     }
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 254a1dd92bdf12b8aa0762297578a55048b056fc..e5fa35354968963859d0b4cbbc01139cbc309250 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -25,6 +25,11 @@
 
 namespace Aidge {
 enum class DataType;
+
+/**
+ * @brief Groupement of Nodes forming a computational graph on which properties and functions
+ * can easily and safely be applied or run.
+ */
 class GraphView : public std::enable_shared_from_this<GraphView> {
 private:
     /// @brief Name of the graphview
@@ -49,11 +54,11 @@ public:
         // ctor
     }
 
-    GraphView(std::set<NodePtr> nodes, std::string name="")
-        : mName(name) 
-    {
-        add(nodes);
-    }
+    // GraphView(std::set<NodePtr> nodes, std::string name="")
+    //     : mName(name) 
+    // {
+    //     add(nodes);
+    // }
 
     bool operator==(const GraphView &gv) const 
     {
@@ -104,54 +109,68 @@ public:
 //        TENSOR MANAGEMENT
 ///////////////////////////////////////////////////////
 public:
-    inline std::set<NodePtr> inputNodes() const noexcept { return mInputNodes; }
-    inline std::set<NodePtr> outputNodes() const noexcept { return mOutputNodes; }
+    /** @brief Get reference to the set of input Nodes. */
+    inline const std::set<NodePtr>& inputNodes() const noexcept { return mInputNodes; }
+    /** @brief Get reference to the set of output Nodes. */
+    inline const std::set<NodePtr>& outputNodes() const noexcept { return mOutputNodes; }
 
+    /** @brief Assess if the given Node is an input Node of the GraphView object. */
     inline bool isInputNode(NodePtr nodePtr) const {
         return (mInputNodes.find(nodePtr) != mInputNodes.end()) ? true : false;
     }
+    /** @brief Assess if the given Node is an output Node of the GraphView object. */
     inline bool isOutputNode(NodePtr nodePtr) const {
         return (mOutputNodes.find(nodePtr) != mOutputNodes.end()) ? true : false;
     }
 
     /**
-     * @brief List data input Tensors of the graph input nodes.
+     * @brief List dataInput connections of the GraphView object's inputNodes.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
     std::vector<std::pair<NodePtr, IOIndex_t>> dataInputs() const;
 
     /**
-     * @brief List data input Tensors of the graph input nodes.
+     * @brief List dataInput connections of the GraphView object's inputNodes.
      * @param name Name of the Node.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
-    inline auto dataInputs(std::string name) const { return mNodeRegistry.at(name)->dataInputs(); }
+    inline auto dataInputs(const std::string name) const { return mNodeRegistry.at(name)->dataInputs(); }
 
     /**
-     * @brief List input Tensors of the graph input nodes.
+     * @brief List input connections of the GraphView object's inputNodes.
      * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
     std::vector<std::pair<NodePtr, IOIndex_t>> inputs() const;
 
+    /**
+     * @brief List input connections of the specified GraphView object's inputNode.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
+     */
     std::vector<std::pair<NodePtr, IOIndex_t>> inputs(std::string name) const;
 
     /**
-     * @brief List output Tensors of the node.
-     * @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
+     * @brief List output connections of the GraphView object's outputNodes.
+     * @return std::vector<std::pair<NodePtr, IOIndex_t>>
      */
     std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs() const;
 
     /**
-     * @brief Specific i-th output Tensor of the GraphView.
+     * @brief Specific i-th output connection of the GraphView object.
      * @param nodeName Name of the Node of which to show the output.
      * @return std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>>
      */
     std::vector<std::vector<std::pair<NodePtr, IOIndex_t>>> outputs(
             std::string nodeName) const;
 
+    /**
+     * @brief Compute dimensions of input/output Tensors for each Operator of the
+     * GraphView object's Nodes.
+     */
     void forwardDims();
 
+    /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string &backend);
+    /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setDatatype(const DataType &datatype);
 
 ///////////////////////////////////////////////////////
@@ -159,39 +178,49 @@ public:
 ///////////////////////////////////////////////////////
 public:
     /**
-     * @brief Get the Parents of inputNodes.
-     * @return std::vector<NodePtr>
+     * @brief Get the parents Nodes of inputNodes.
+     * @return std::set<NodePtr>
      */
     std::set<NodePtr> getParents() const;
+    /**
+     * @brief Get parents Nodes of the specified Node.
+     * @param nodeName Name of the Node.
+     * @return std::vector<NodePtr> 
+     */
     std::vector<NodePtr> getParents(const std::string nodeName) const;
     std::vector<std::vector<NodePtr>> getOrderedParents() const;
 
     /**
-     * @brief Get the Children of outputNodes.
+     * @brief Get the children Nodes of outputNodes.
      * @return std::set<NodePtr>
      */
     std::set<NodePtr> getChildren() const;
+    /**
+     * @brief Get children Nodes of the specified Node.
+     * @param nodeName Name of the Node.
+     * @return std::vector<std::vector<NodePtr>>
+     */
     std::vector<std::vector<NodePtr>> getChildren(const std::string nodeName) const;
     std::set<NodePtr> getChildren(
             const NodePtr otherNode) const;  // TODO change it for a vector<vector> ?
 
     /**
-     * @brief Getter for Operators of the GraphView.
-     * @return std::set<NodePtr>
+     * @brief Get the Nodes pointed to by the GraphView object.
+     * @return std::set<NodePtr> 
      */
     inline std::set<NodePtr> getNodes() const { return mNodes; }
 
     /**
      * @brief Get the operator with the corresponding name if it is in the
      * GraphView.
-     * @param nodeName name of the node.
-     * @return NodePtr return a new empty node if the one asked for
+     * @param nodeName Name of the node.
+     * @return NodePtr returns a new empty node if the one asked for
      * was not found.
      */
     NodePtr getNode(const char *nodeName) const;
 
     /**
-     * @brief Remove a Node from the current GraphView scope without affecting its connections
+     * @brief Remove a Node from the current GraphView scope without affecting its connections.
      * @param nodePtr Node to remove
      * @param includeLearnableParam Whether learnable parameters should also be removed. Default true.
      */
@@ -202,12 +231,17 @@ public:
     void setInputId(IOIndex_t inID, IOIndex_t newNodeOutID);
 
     /**
-     * @brief Includes a Node to the current GraphView
-     * @param other_node Node to add.
-     * @param includeLearnableParam Should non-data inputs, like weights and biases
-     * be included in the GraphView automatically. Default: true.
+     * @brief Include a Node to the current GraphView object.
+     * @param other_Nde Node to add.
+     * @param includeLearnableParam Include non-data inputs, like weights and biases 
+     * in the GraphView automatically. Default: true.
      */
     void add(NodePtr otherNode, bool includeLearnableParam = true);
+    /**
+     * @brief Include a set of Nodes to the current GraphView object.
+     * @param otherNodes 
+     * @param includeLearnableParam 
+     */
     void add(std::set<NodePtr> otherNodes,
              bool includeLearnableParam = true);
 
@@ -307,15 +341,27 @@ private:
 //        TENSOR MANAGEMENT
 ///////////////////////////////////////////////////////
 
-    IONb_t getNbDataInputs() const;
-
-    IONb_t getNbFreeDataInputs() const;
+    /**
+     * @brief Get the sum of the number of dataInput Nodes for all inputNodes of the GraphView object.
+     * @return IOIndex_t 
+     */
+    IOIndex_t getNbDataInputs() const;
 
+    /**
+     * @brief Get the sum of the number of free dataInput connection for all inputNodes of the GraphView object.
+     * @return IOIndex_t 
+     */
+    IOIndex_t getNbFreeDataInputs() const;
 
+    /**
+     * @brief Update the set of inputNodes with a new Node, checking if it can be
+     * added and removing any Node not part of mInputNode anymore.
+     * @param nodePtr
+     */
     void updateInputNodes(NodePtr node);
 
     /**
-     * @brief Update the set of output Nodes with a new Node,checking if it can be
+     * @brief Update the set of outputNodes with a new Node, checking if it can be
      * added and removing any Node not part of mOutputNode anymore.
      * @param nodePtr
      */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index 977b0e4721bad9e83a9f3e04e5228bd25cf186da..c7473293e11527eb5c4fba7b39fcd7c6b2c19500 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -29,27 +29,29 @@ using NodePtr = std::shared_ptr<Node>;
 
 class GraphView;
 
+/**
+ * @brief Object carrying the topological information of the computational graph.
+ */
 class Node : public std::enable_shared_from_this<Node> {
 private:
-  std::string mName; // Name of the Node. Should be unique
-
-  std::set<std::shared_ptr<GraphView>> mViews =
-      std::set<std::shared_ptr<GraphView>>(); // Set of pointers to GraphView
-                                              // instances including this Node
-                                              // instance
-  const std::shared_ptr<Operator>
-      mOperator; // Pointer to the associated Operator
-
-  std::vector<NodePtr>
-      mParents; // List of parent nodes (Parent --> Node --> Child)
-  std::vector<std::vector<NodePtr>>
-      mChildren; // List of child nodes for each output (Parent --> Node -->
-                 // Child)
-  std::vector<std::vector<IOIndex_t>> mIdInChildren; // InID of Child node.
-  std::vector<IOIndex_t> mIdOutParents; // OutID of Parent node. Default: gk_IODefaultIndex.
+  std::string mName; /** Name of the Node. Should be unique. */
+
+  std::set<std::shared_ptr<GraphView>> mViews = std::set<std::shared_ptr<GraphView>>(); /** Set of pointers to GraphView instances including this Node instance. */
+  const std::shared_ptr<Operator> mOperator; // Pointer to the associated Operator
+
+  std::vector<NodePtr> mParents; /** List of parent node for each input (Parent --> Node --> Child) */
+  std::vector<std::vector<NodePtr>> mChildren; /** List of children nodes for each output (Parent --> Node --> Child) */
+  std::vector<std::vector<IOIndex_t>> mIdInChildren; /** List of input index for each Node linked to each output of the Node. */
+  std::vector<IOIndex_t> mIdOutParents; /** index of the output linked to each input of the Node. Default: gk_IODefaultIndex. */
 
 public:
   Node() = delete;
+
+  /**
+   * @brief Construct a new Node object associated with the input Operator.
+   * @param op Operator giving the Node its number of connections.
+   * @param name (optional) name for the Node.
+   */
   Node(std::shared_ptr<Operator> op, const char *name = nullptr);
 
   virtual ~Node() = default;
@@ -63,6 +65,11 @@ public:
   //        FUNCTIONAL DESCRIPTION
   ///////////////////////////////////////////////////////
 
+  /**
+   * @brief Functional operator for user-friendly connection interface using an ordered set of Connectors.
+   * @param ctors Ordered Connectors linking their associated Node to the input of the current Node with the same index.
+   * @return Connector 
+   */
   Connector operator()(const std::vector<Connector> ctors);
 
 public:
@@ -71,13 +78,13 @@ public:
   ///////////////////////////////////////////////////////
 
   /**
-   * @brief Name of the node.
+   * @brief Name of the Node.
    * @return std::string
    */
   inline std::string name() const noexcept { return mName; }
 
   /**
-   * @brief Set the node name.
+   * @brief Set the Node name.
    * @warning Undefined behaviour when several Nodes have the same name.
    * @param name New name for the node.
    */
@@ -114,7 +121,7 @@ public:
   ///////////////////////////////////////////////////////
 
   /**
-   * @brief Whether or not every input of the Node is linked to a Tensor.
+   * @brief Whether or not every input of the Node is linked to a Parent.
    * If true then the Node is ready to be executed.
    * @return true
    * @return false
@@ -141,33 +148,33 @@ public:
    * @param inID
    * @return std::pair<NodePtr, IOIndex_t>
    */
-  inline std::pair<NodePtr, IOIndex_t> input(IOIndex_t inID) const {
-    assert((inID != gk_IODefaultIndex) && (static_cast<IONb_t>(inID) < nbInputs()) && "Input index out of bound.");
-    return std::pair<NodePtr, IOIndex_t>(mParents[inID],
-                                                 mIdOutParents[inID]);
+  inline std::pair<NodePtr, IOIndex_t> input(const IOIndex_t inID) const {
+    assert((inID != gk_IODefaultIndex) && (inID < nbInputs()) && "Input index out of bound.");
+    return std::pair<NodePtr, IOIndex_t>(mParents[inID], mIdOutParents[inID]);
   }
 
   /**
    * @brief Set fix value for the specified input by creating a Producer wrapping the given Tensor.
    * 
-   * @param idx input index
-   * @param tensor constant tensor to add as parent for specified index.
+   * @param idx Input index.
+   * @param tensor Constant Tensor to add as parent for specified index.
    */
   void setInput(const IOIndex_t idx, const std::shared_ptr<Tensor> tensor);
 
   /**
-   * @brief Get the lowest index in the input Data Parent list equal to the
+   * @brief Get the lowest index in the InputData Parent list equal to the
    * nullptr.
    * @return std::size_t
    */
   inline IOIndex_t getFirstFreeDataInput() const {
     IOIndex_t i = 0;
-    for (; (static_cast<IONb_t>(i) < nbDataInputs()) && (input(i).second >= 0); ++i) {}
+    for (; (i < nbDataInputs()) && (input(i).second != gk_IODefaultIndex); ++i) {}
     // assert((i<nbDataInputs()) && "No free data input for Node");
-    return (static_cast<IONb_t>(i) < nbDataInputs()) ? i : gk_IODefaultIndex;
+    return (i < nbDataInputs()) ? i : gk_IODefaultIndex;
   }
 
-  IONb_t getNbFreeDataInputs() const;
+
+  IOIndex_t getNbFreeDataInputs() const;
 
   /**
    * @brief List input ids of children liked to outputs of the node
@@ -190,14 +197,14 @@ public:
    * @details [data, data, weight, bias] => 4
    * @return IOIndex_t
    */
-  inline IONb_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
+  inline IOIndex_t nbInputs() const noexcept { return getOperator()->nbInputs(); }
 
   /**
    * @brief Number of input specifically for data
    * @details [data, data, weight, bias] => 2
    * @return IOIndex_t
    */
-  inline IONb_t nbDataInputs() const noexcept {
+  inline IOIndex_t nbDataInputs() const noexcept {
     return getOperator()->nbDataInputs();
   }
 
@@ -205,15 +212,15 @@ public:
    * @brief Number of inputs linked to a Parent's output.
    * @return IOIndex_t
    */
-  IONb_t nbValidInputs() const;
+  IOIndex_t nbValidInputs() const;
 
   /**
    * @brief Getter for the number of Output Tensors of the Node.
    * @return IOIndex_t
    */
-  inline IONb_t nbOutputs() const noexcept { return getOperator()->nbOutputs(); }
+  inline IOIndex_t nbOutputs() const noexcept { return getOperator()->nbOutputs(); }
 
-  IONb_t nbValidOutputs() const;
+  IOIndex_t nbValidOutputs() const;
 
   ///////////////////////////////////////////////////////
   //        TOPOLOGY
@@ -271,34 +278,52 @@ public:
                 std::pair<NodePtr, IOIndex_t>(nullptr, gk_IODefaultIndex));
 
   /**
-   * @brief Get the list of parent Nodes. As an input is linked to a unic Node,
-   * if non is linked then the parent is a nullptr.
+   * @brief Get the list of parent Nodes. As an input is linked to a unique Node,
+   * if none is linked then the parent is a nullptr.
    * @return std::vector<NodePtr>
    */
   std::vector<NodePtr> getParents() const;
 
-  inline NodePtr &getParents(IOIndex_t inID) {
-    assert(inID != gk_IODefaultIndex);
-    return mParents.at(inID);
+  /**
+   * @brief Get the pointer to parent of the specified input index. This pointer is nullptr if no parent is linked.
+   * @param inId Input index.
+   * @return NodePtr& 
+   */
+  inline NodePtr &getParents(const IOIndex_t inId) {
+    assert(inId != gk_IODefaultIndex);
+    return mParents.at(inId);
   }
 
-  NodePtr popParent(const IOIndex_t inID);
+  /**
+   * @brief Unlink the parent Node at the specified input index and return its pointer.
+   * Return a nullptr is no parent was linked.
+   * @param inId Input index.
+   * @return NodePtr 
+   */
+  NodePtr popParent(const IOIndex_t inId);
 
-  bool removeParent(const IOIndex_t inID);
+  bool removeParent(const IOIndex_t inId);
 
   /**
-   * @brief Get the Children object. Children do not include any nullptr as
-   * an output maybe linked to nobody and the Node would still work fine.
+   * @brief Get the set of pointers to children Nodes linked to the current Node.object.
+   * @details The returned set does not include any nullptr as an output maybe linked to
+   * an undifined number of Nodes. It does not change the computation of its associated Operator.
    * @return std::set<NodePtr>>
    */
   std::set<NodePtr> getChildren() const;
 
   std::vector<std::vector<NodePtr>> getOrderedChildren() const;
 
-  std::vector<NodePtr> getChildren(IOIndex_t outID) const;
+  /**
+   * @brief Get the list of children Nodes linked to the output at specified index.
+   * @param outId Output index.
+   * @return std::vector<NodePtr> 
+   */
+  std::vector<NodePtr> getChildren(const IOIndex_t outID) const;
 
   /**
-   * @brief Remove registered child from children lists if possible.
+   * @brief Remove registered child from children list of specified output if possible.
+   * If so, also remove current Node from child Node from parent.
    * @param nodePtr Node to remove.
    * @param outId Output index. Default 0.
    * @return true Child found and removed for given output index.
@@ -316,44 +341,50 @@ private:
   //        OPERATORS
   ///////////////////////////////////////////////////////
 
+  // cannot change operator for now
   // void setOperator(const std::shared_ptr<Operator> op_ptr);
 
   ///////////////////////////////////////////////////////
   //        TENSOR MANAGEMENT
   ///////////////////////////////////////////////////////
 
-  void setInputId(IOIndex_t inID, IOIndex_t newNodeOutID);
+  /**
+   * @brief Set the idInChildren parameter.
+   * @param inID 
+   * @param newNodeOutID 
+   */
+  void setInputId(const IOIndex_t inID, const IOIndex_t newNodeOutID);
 
   ///////////////////////////////////////////////////////
   //        TOPOLOGY
   ///////////////////////////////////////////////////////
 
   /**
-   * @brief add function specialized in adding Nodes.
-   * @param other_node
-   * @param outID
-   * @param other_inID
+   * @brief Add the given Node as a child for the current Node.
+   * @param otherNode 
+   * @param outId 
+   * @param otherInId 
    */
-  void addChildOp(NodePtr other_node, const IOIndex_t outID,
-                  IOIndex_t other_inID);
+  void addChildOp(NodePtr otherNode, const IOIndex_t outId,
+                  const IOIndex_t otherInId);
 
   /**
-   * @brief add functon specialized in adding GraphView.
-   *
-   * @param other_graph
-   * @param outID
-   * @param other_inID
+   * @brief Add the given GraphView's input Node as a child for the current Node
+   * @param otherGraph 
+   * @param outId 
+   * @param otherInId pointer the GraphView's input Node and its input index. Defaults to the
+   * only input Node if the GraphView has got one.
    */
-  void addChildView(std::shared_ptr<GraphView> other_graph,
-                    const IOIndex_t outID,
-                    std::pair<NodePtr, IOIndex_t> other_inID);
+  void addChildView(std::shared_ptr<GraphView> otherGraph,
+                    const IOIndex_t outId,
+                    std::pair<NodePtr, IOIndex_t> otherInId);
 
   /**
    * @brief Add a Node to the list of parents.
-   * @param other_node Node to add to parents list.
-   * @param inID index for adding the parent.
+   * @param otherNode Node to add to parents list.
+   * @param inId index for adding the parent.
    */
-  void addParent(const NodePtr other_node, const IOIndex_t inID);
+  void addParent(const NodePtr otherNode, const IOIndex_t inId);
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 13ebcd48cb93c98944b20522b2314d928b7d22fe..36e592682e61fbc178ed4623f88e9fa5f446f25d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -133,9 +133,9 @@ public:
         }
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return NUM; }
-    inline IONb_t nbDataInputs() const noexcept override final { return NUM; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 template <std::size_t NUM>
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index e79640842e374d97c2cb982b2081f2fbdcbd0645..a86942d14e531e5974c8924d8dafb8a4d0bebf85 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -133,9 +133,9 @@ public:
         mInput->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 1; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 2c11fa2d2abba606d1bfbcb17ff0436dba012385..6c64ae44c04f9a8f37d0dde14b251da94ce72a3f 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -134,9 +134,9 @@ public:
         mInputs[4]->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 5; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 5; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 2620d6261b26663afd0dd527547eaab1df1d521d..babeac443dd8d51a8b9d3de5a2e96b8745636060 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -155,9 +155,9 @@ public:
         mInputs[2]->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 3; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index ad9e1951237d7c7a20419a12abc3ba668b397c19..7cbc609798064e993c7744fdf08865d897518a89 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -156,9 +156,9 @@ class ConvDepthWise_Op : public Operator,
         mInputs[2]->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 3; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 9b36640af717547baf0f20d06a700129c07f8f47..ebd3a8826dbca292b57f4d3cae749f4e1d7968c8 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -132,9 +132,9 @@ public:
     }
 
 
-    inline IONb_t nbInputs() const noexcept override final { return 3; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 3; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) {
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index b8b86521a2fa925cfc04adf0a313e518473d9cab..254d62c6bdff89dd28079245adf0b2559cca66f8 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -29,14 +29,14 @@ class GenericOperator_Op
       public Registrable<GenericOperator_Op, std::string, std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)> {
    private:
     CParameter mParams;
-    IONb_t mNbDataIn;
-    IONb_t mNbIn;
-    IONb_t mNbOut;
+    IOIndex_t mNbDataIn;
+    IOIndex_t mNbIn;
+    IOIndex_t mNbOut;
     std::vector<std::shared_ptr<Tensor>> mInputs;
     std::vector<std::shared_ptr<Tensor>> mOutputs;
 
    public:
-    GenericOperator_Op(const char *type, IONb_t nbDataIn, IONb_t nbIn, IONb_t nbOut)
+    GenericOperator_Op(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut)
         : Operator(type), mNbDataIn(nbDataIn), mNbIn(nbIn), mNbOut(nbOut)
     {
         mInputs = std::vector<std::shared_ptr<Tensor>>(nbIn);
@@ -141,9 +141,9 @@ class GenericOperator_Op
     void forward() override final { printf("forward: not available yet.\n"); }
     void backward() override final { printf("backward: not available yet.\n"); }
 
-    inline IONb_t nbInputs() const noexcept override final { return mNbIn; };
-    inline IONb_t nbDataInputs() const noexcept override final { return mNbDataIn; };
-    inline IONb_t nbOutputs() const noexcept override final { return mNbOut; };
+    inline IOIndex_t nbInputs() const noexcept override final { return mNbIn; };
+    inline IOIndex_t nbDataInputs() const noexcept override final { return mNbDataIn; };
+    inline IOIndex_t nbOutputs() const noexcept override final { return mNbOut; };
 };
 
 /**
@@ -156,7 +156,7 @@ class GenericOperator_Op
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
-inline std::shared_ptr<Node> GenericOperator(const char *type, IONb_t nbDataIn, IONb_t nbIn, IONb_t nbOut,
+inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
                                              const char *name = nullptr) {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
 }
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 4793d9ca69dcb2b11fe4b83b4871c5b6becff358..ed967001a23a6b9dd4cfe5db09ec4f1edd60e5ea 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -107,9 +107,9 @@ public:
         mInput->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 1; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) {
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp
index 1d8174775a13074b3d39751267828ea3b473f59e..a871fe516c95802fdb67e81ca3f58fb3be4dce25 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/Matmul.hpp
@@ -122,9 +122,9 @@ public:
     }
 
 
-    inline IONb_t nbInputs() const noexcept override final { return 2; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 2; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) {
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index bb20177b106f81c47723020e1d674fbc5b3f7974..9f24ce884863776f6856ee03fb4feb089e6323e2 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -90,9 +90,9 @@ public:
         return mType;
     }
 
-    virtual IONb_t nbInputs() const noexcept = 0;
-    virtual IONb_t nbDataInputs() const noexcept = 0;
-    virtual IONb_t nbOutputs() const noexcept = 0;
+    virtual IOIndex_t nbInputs() const noexcept = 0;
+    virtual IOIndex_t nbDataInputs() const noexcept = 0;
+    virtual IOIndex_t nbOutputs() const noexcept = 0;
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 5ce01ee38d6477718ee4205c522e51661883c02a..4d5461957826e9ebea4a39bb9a7618604e80797a 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -60,7 +60,10 @@ public:
     constexpr bool outputDimsForwarded() const override final {return true;}
 
 
-    inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final { assert(false); }
+    [[noreturn]] inline Tensor& input(__attribute__((unused)) const IOIndex_t inputIdx) const override final {
+      assert(false);
+      exit(-1);
+    }
     inline Tensor& output(__attribute__((unused)) const IOIndex_t outputIdx) const override final { return *(mOutput.get()); }
 
 
@@ -94,9 +97,9 @@ public:
         mOutput->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 0; };
-    inline IONb_t nbDataInputs() const noexcept override final { return 0; };
-    inline IONb_t nbOutputs() const noexcept override final { return 1; };
+    inline IOIndex_t nbInputs() const noexcept override final { return 0; };
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
 
 public:
   void forward() override final {
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 0c8c744b4ed331668b1ff64bc9c03621f5fbfb9b..93bc9a74091c2893dc7b1f7fcc34c72828f34f27 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -96,9 +96,9 @@ public:
         mInput->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 1; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 inline std::shared_ptr<Node> ReLU(const char* name = nullptr) {
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index b430c138548344a0fcd4ea1dcd742c3d38288cfe..9be2acde8570bdc250054e9bed7a1b0d5c3e52ff 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -96,9 +96,9 @@ public:
         mInput->setDatatype(datatype);
     }
 
-    inline IONb_t nbInputs() const noexcept override final { return 1; }
-    inline IONb_t nbDataInputs() const noexcept override final { return 1; }
-    inline IONb_t nbOutputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
+    inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
 inline std::shared_ptr<Node> Softmax(const char* name = nullptr) {
diff --git a/include/aidge/utils/Types.h b/include/aidge/utils/Types.h
index f626c635287f6b0d859dee163601c41e236d4bc4..d05c64ead0e147a8d66c7f40dbd978283401683a 100644
--- a/include/aidge/utils/Types.h
+++ b/include/aidge/utils/Types.h
@@ -47,14 +47,14 @@ constexpr DimIdx_t MaxDim = std::numeric_limits<DimIdx_t>::max();
 ///\brief Signed integral type to hold an IO index.
 ///\details <0 values reserved
 ///\todo Change it for an unsigned value with default to numeric_limit and max to numeric_limit-1
-using IOIndex_t = std::make_signed<std::uint16_t>::type;
+using IOIndex_t = std::uint16_t;
 /// @brief Default for absence of connection
-constexpr IOIndex_t gk_IODefaultIndex = -1;
-constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max();
+constexpr IOIndex_t gk_IODefaultIndex = std::numeric_limits<IOIndex_t>::max();
+constexpr IOIndex_t gk_IOMaxIndex = std::numeric_limits<IOIndex_t>::max() - 1;
 
-///\brief Number of input/output connections for a Node/Operator
-using IONb_t = std::uint16_t;
-constexpr IONb_t gk_IOMaxNb = std::numeric_limits<IONb_t>::max();
+// ///\brief Number of input/output connections for a Node/Operator
+// using IOIndex_t = std::uint16_t;
+// constexpr IOIndex_t gk_IOMaxNb = std::numeric_limits<IOIndex_t>::max();
 
 
 } // namespace Aidge
diff --git a/src/graph/Connector.cpp b/src/graph/Connector.cpp
index ca35b38ad20584c56dad0c178ee90ab94a14cd29..f189b92b24cc5529ae8fb6d8c9faac97e296a92c 100644
--- a/src/graph/Connector.cpp
+++ b/src/graph/Connector.cpp
@@ -24,7 +24,7 @@ Aidge::Connector::Connector(std::shared_ptr<Aidge::Node> node) {
     }
 }
 
-Aidge::IONb_t Aidge::Connector::size() const { return mNode->nbOutputs(); }
+Aidge::IOIndex_t Aidge::Connector::size() const { return mNode->nbOutputs(); }
 
 std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ctors) {
     std::shared_ptr<GraphView> graph = std::make_shared<GraphView>();
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index e0fa89237e6f98552120c8f50f6ddb64fa70f5ab..afbc68c79c04f0cb783f55f043f991f6bd71a709 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -106,17 +106,17 @@ void Aidge::GraphView::save(std::string path, bool verbose) const {
 //        TENSOR MANAGEMENT
 ///////////////////////////////////////////////////////
 
-Aidge::IONb_t Aidge::GraphView::getNbDataInputs() const {
-  IONb_t nbDataInput = static_cast<IONb_t>(0);
-  assert(outputNodes().size() == static_cast<std::size_t>(1));
+Aidge::IOIndex_t Aidge::GraphView::getNbDataInputs() const {
+  IOIndex_t nbDataInput = 0;
+  // assert(outputNodes().size() == static_cast<std::size_t>(1));
   for (const std::shared_ptr<Node> &inNode : inputNodes()) {
     nbDataInput += inNode->nbDataInputs();
   }
   return nbDataInput;
 }
 
-Aidge::IONb_t Aidge::GraphView::getNbFreeDataInputs() const {
-  IONb_t nbIn = 0;
+Aidge::IOIndex_t Aidge::GraphView::getNbFreeDataInputs() const {
+  IOIndex_t nbIn = 0;
   for (const std::shared_ptr<Node> inputNode : mInputNodes) {
     nbIn += inputNode->getNbFreeDataInputs();
   }
@@ -126,7 +126,7 @@ Aidge::IONb_t Aidge::GraphView::getNbFreeDataInputs() const {
 
 std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>
 Aidge::GraphView::dataInputs() const {
-  IONb_t nbDataIn = 0U;
+  IOIndex_t nbDataIn = 0U;
   for (const std::shared_ptr<Node> inputNode : mInputNodes) {
     nbDataIn += inputNode->nbDataInputs();
   }
@@ -178,7 +178,7 @@ void Aidge::GraphView::forwardDims() {
     // Link every tensor to the right pointer
     // following parent - children informations
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
-        for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nodePtr->nbInputs(); ++i) {
+        for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
             // assess if the input was not already set and is a Tensor then link it to parent output
             std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
             if (inputI.first) {
@@ -323,7 +323,7 @@ void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnablePara
     mNodeRegistry.insert(std::make_pair(node->name(), node));
   // add learnable parameters to the graph
   if (includeLearnableParam) {
-    for (IONb_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
+    for (IOIndex_t i = node->nbDataInputs(); i < node->nbInputs(); ++i) {
       std::shared_ptr<Node> parentNode = node->getParents(static_cast<IOIndex_t>(i));
       if (parentNode) {
           parentNode->addView(shared_from_this());
@@ -483,7 +483,7 @@ void Aidge::GraphView::remove(std::shared_ptr<Node> nodePtr, bool includeLearnab
   // same for learnable params
 
   if (includeLearnableParam) {
-    for (IONb_t i = nodePtr->nbDataInputs(); i < nodePtr->nbInputs(); ++i) {
+    for (IOIndex_t i = nodePtr->nbDataInputs(); i < nodePtr->nbInputs(); ++i) {
       auto inputI = nodePtr->input(i);
       bool removeNode = true;
       for (const auto& parentOutput : inputI.first->outputs()) {
@@ -576,7 +576,7 @@ bool Aidge::GraphView::replaceWith(std::set<std::shared_ptr<Node>> newNodes) {
 
     // copy output connections
     if (newOutputNode) {
-      for (IONb_t o = 0; o < previousOutputNode->nbOutputs(); ++o) {
+      for (IOIndex_t o = 0; o < previousOutputNode->nbOutputs(); ++o) {
         auto outputPairs = copyOutputs[o];
         for (const auto& onePair : outputPairs) {
           newOutputNode->addChild(onePair.first, o, onePair.second);
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 6104d49dd121ffe885c077e72358684319eabb76..5568e4b599195f50450bcb715c6e03e034c1ceb2 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -72,7 +72,7 @@ void Aidge::Node::backward() {
 ///////////////////////////////////////////////////////
 
 bool Aidge::Node::valid() const {
-    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbInputs(); ++i) {
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
         if (mIdOutParents[static_cast<std::size_t>(i)] == gk_IODefaultIndex) {
             return false;
         }
@@ -80,10 +80,10 @@ bool Aidge::Node::valid() const {
     return true;
 }
 
-Aidge::IONb_t Aidge::Node::getNbFreeDataInputs() const {
-    IONb_t nbFreeDataIn = 0;
-    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbInputs(); ++i) {
-        if (input(i).second < 0) {
+Aidge::IOIndex_t Aidge::Node::getNbFreeDataInputs() const {
+    IOIndex_t nbFreeDataIn = 0;
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (input(i).second == gk_IODefaultIndex) {
             ++nbFreeDataIn;
         }
     }
@@ -111,7 +111,7 @@ std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>> Aidge::No
 }
 
 void Aidge::Node::setInput(const Aidge::IOIndex_t idx, const std::shared_ptr<Aidge::Tensor> tensor) {
-    assert((idx != gk_IODefaultIndex) && (static_cast<IONb_t>(idx) < nbInputs()) && "Parent index out of bound.");
+    assert(((idx != gk_IODefaultIndex) && (idx < nbInputs())) && "Parent index out of bound.");
     if (mParents[idx] != nullptr) {
         mParents[idx]->removeChild(shared_from_this(), mIdOutParents[idx]);
         removeParent(idx);
@@ -144,16 +144,16 @@ Aidge::Node::output(Aidge::IOIndex_t outID) const {
     return listOutputs;
 }
 
-Aidge::IONb_t Aidge::Node::nbValidInputs() const {
-    IONb_t counter = 0;
-    for (IONb_t i = 0; i < nbInputs(); ++i) {
-        if (mIdOutParents[static_cast<std::size_t>(i)] < 0) ++counter;
+Aidge::IOIndex_t Aidge::Node::nbValidInputs() const {
+    IOIndex_t counter = 0;
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (mIdOutParents[static_cast<std::size_t>(i)] == gk_IODefaultIndex) ++counter;
     }
     return counter;
 }
 
-Aidge::IONb_t Aidge::Node::nbValidOutputs() const {
-    IONb_t counter = 0;
+Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const {
+    IOIndex_t counter = 0;
     if (mIdInChildren.size() == 0) return 0;
     for (std::size_t i = 0; i < nbOutputs(); ++i) {
         if (mIdInChildren[i].size() > 0U) counter++;
@@ -161,8 +161,8 @@ Aidge::IONb_t Aidge::Node::nbValidOutputs() const {
     return counter;
 }
 
-void Aidge::Node::setInputId(IOIndex_t inId, IOIndex_t newNodeOutID) {
-    assert(inId != gk_IODefaultIndex && (static_cast<IONb_t>(inId) < nbInputs()) && "Must be a valid index");
+void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeOutID) {
+    assert(inId != gk_IODefaultIndex && (inId < nbInputs()) && "Must be a valid index");
     if (mIdOutParents[inId] != gk_IODefaultIndex) {
         std::printf("Warning: filling a Tensor already attributed\n");
         auto originalParent = input(inId);
@@ -178,11 +178,11 @@ void Aidge::Node::setInputId(IOIndex_t inId, IOIndex_t newNodeOutID) {
 // TOPOLOGY
 ///////////////////////////////////////////////////////
 
-void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t outId, IOIndex_t otherInId) {
-    assert((otherInId != gk_IODefaultIndex) && (static_cast<IONb_t>(otherInId) < otherNode->nbInputs()) &&
+void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t outId, const IOIndex_t otherInId) {
+    assert((otherInId != gk_IODefaultIndex) && (otherInId < otherNode->nbInputs()) &&
            "Input index out of bound.");
-    assert((outId != gk_IODefaultIndex) && (static_cast<IONb_t>(outId) < nbOutputs()) && "Output index out of bound.");
-    if (otherNode->input(otherInId).second >= 0) {
+    assert((outId != gk_IODefaultIndex) && (outId < nbOutputs()) && "Output index out of bound.");
+    if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
         std::printf("Warning, the %d-th Parent of the child node already existed.\n", otherInId);
     }
     // manage tensors and potential previous parent
@@ -197,9 +197,9 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou
 void Aidge::Node::addChildView(std::shared_ptr<GraphView> other_graph, const IOIndex_t outID,
                               std::pair<std::shared_ptr<Node>, IOIndex_t> otherInId) {
     assert((otherInId.second != gk_IODefaultIndex) &&
-           (static_cast<IONb_t>(otherInId.second) < otherInId.first->nbInputs()) &&
+           (otherInId.second < otherInId.first->nbInputs()) &&
            "Other graph input index out of bound.");
-    assert((outID != gk_IODefaultIndex) && (static_cast<IONb_t>(outID) < nbOutputs()) && "Output index out of bound.");
+    assert((outID != gk_IODefaultIndex) && (outID < nbOutputs()) && "Output index out of bound.");
     std::set<std::shared_ptr<Node>> inNodes = other_graph->inputNodes();
     if (inNodes.size() == std::size_t(0)) {  // no input Node
         printf("Cannot add GraphView to the Node. No input node detected.\n");
@@ -211,7 +211,7 @@ void Aidge::Node::addChildView(std::shared_ptr<GraphView> other_graph, const IOI
 }
 
 void Aidge::Node::addChild(std::shared_ptr<Node> otherNode, const IOIndex_t outId, IOIndex_t otherInId) {
-    otherInId = (otherInId >= 0) ? otherInId : otherNode->getFirstFreeDataInput();
+    otherInId = (otherInId != gk_IODefaultIndex) ? otherInId : otherNode->getFirstFreeDataInput();
     addChildOp(otherNode, outId, otherInId);
 }
 
@@ -223,7 +223,7 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
                "Node is not explicit.");
         otherInId.first = *(otherView->inputNodes().begin());
     }
-    otherInId.second = (otherInId.second >= 0) ? otherInId.second : otherInId.first->getFirstFreeDataInput();
+    otherInId.second = (otherInId.second != gk_IODefaultIndex) ? otherInId.second : otherInId.first->getFirstFreeDataInput();
     addChildView(otherView, outId, otherInId);
 }
 
@@ -231,21 +231,21 @@ void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOInde
     if (getParents(inId) != nullptr) {
         printf("Warning, you're replacing a Parent.\n");
     }
-    assert((inId != gk_IODefaultIndex) && (static_cast<IONb_t>(inId) < nbInputs()) && "Input index out of bound.");
+    assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
     mParents[inId] = other_node;
 }
 
 std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getParents() const { return mParents; }
 
 std::shared_ptr<Aidge::Node> Aidge::Node::popParent(const IOIndex_t inId) {
-    assert((inId != gk_IODefaultIndex) && (static_cast<IONb_t>(inId) < nbInputs()) && "Input index out of bound.");
+    assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
     std::shared_ptr<Node> val = mParents[inId];
     removeParent(inId);
     return val;
 }
 
 bool Aidge::Node::removeParent(const IOIndex_t inId) {
-    assert((inId != gk_IODefaultIndex) && (static_cast<IONb_t>(inId) < nbInputs()) && "Parent index out of bound.");
+    assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Parent index out of bound.");
     if (mParents[inId]) {
         mParents[inId] = nullptr;
         mIdOutParents[inId] = gk_IODefaultIndex;
@@ -264,13 +264,13 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren() const {
 
 std::vector<std::vector<std::shared_ptr<Aidge::Node>>> Aidge::Node::getOrderedChildren() const { return mChildren; }
 
-std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren(IOIndex_t outID) const {
-    assert((outID != gk_IODefaultIndex) && (static_cast<IONb_t>(outID) < nbOutputs()) && "Output index out of bound.");
+std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getChildren(const IOIndex_t outID) const {
+    assert((outID != gk_IODefaultIndex) && (outID < nbOutputs()) && "Output index out of bound.");
     return mChildren[outID];
 }
 
 bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr, const Aidge::IOIndex_t outId) {
-    assert((outId != gk_IODefaultIndex) && (static_cast<IONb_t>(outId) < nbOutputs()) && "Child index out of bound.");
+    assert((outId != gk_IODefaultIndex) && (outId < nbOutputs()) && "Child index out of bound.");
     bool removed = false;
     for (std::size_t j = 0; j < mChildren[outId].size(); ++j) {
         if (mChildren[outId][j] == nodePtr) {
@@ -285,8 +285,8 @@ bool Aidge::Node::removeChild(const std::shared_ptr<Aidge::Node> nodePtr, const
 
 void Aidge::Node::resetConnections(bool includeLearnableParam) {
     // remove every parents reference to it
-    IONb_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbDataInputs();
-    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbRemovedInputs; ++i) {
+    IOIndex_t nbRemovedInputs = includeLearnableParam ? nbInputs() : nbDataInputs();
+    for (IOIndex_t i = 0; i < nbRemovedInputs; ++i) {
         std::pair<std::shared_ptr<Node>, IOIndex_t> parent = input(i);
         if (parent.first) {
             // number of children linked to the parent's output
@@ -297,7 +297,7 @@ void Aidge::Node::resetConnections(bool includeLearnableParam) {
         mParents[i] = nullptr;
         mIdOutParents[i] = gk_IODefaultIndex;
     }
-    for (IOIndex_t i = 0; static_cast<IONb_t>(i) < nbOutputs(); ++i) {
+    for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         for (std::pair<std::shared_ptr<Node>, IOIndex_t> child : output(i)) {
             child.first->removeParent(child.second);
         }
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 24953ccb0454c91eb5894552c8c814d304a3638c..fce46397ffd286a2ddbe254752b241578415e3d8 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -80,14 +80,14 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
                        "\x1b[0m"
                        "\n\t\tR/C:\t",
                        (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
-                for (IOIndex_t inId = 0; static_cast<IONb_t>(inId) < consumer->nbInputs() - 1; ++inId) {
+                for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
                     printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
                 printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
-                for (IOIndex_t outId = 0; static_cast<IONb_t>(outId) < consumer->nbOutputs() - 1; ++outId) {
+                for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
                     printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
                 printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
@@ -138,14 +138,14 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
             if (verbose) {
                 printf("\t- consumer: %s\n\t\tR/C:\t",
                        (consumer->type() + "_" + std::to_string(reinterpret_cast<uintptr_t>(consumer.get()))).c_str());
-                for (IOIndex_t inId = 0; static_cast<IONb_t>(inId) < consumer->nbInputs() - 1; ++inId) {
+                for (IOIndex_t inId = 0; inId < consumer->nbInputs() - 1; ++inId) {
                     printf("%ld/%ld\n\t\t\t", consumer->getOperator()->getNbConsumedData(inId),
                            consumer->getOperator()->getNbRequiredData(inId));
                 }
                 printf("%ld/%ld", consumer->getOperator()->getNbConsumedData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1),
                        consumer->getOperator()->getNbRequiredData(static_cast<IOIndex_t>(consumer->nbInputs()) - 1));
                 printf("\n\t\tP:\t");
-                for (IOIndex_t outId = 0; static_cast<IONb_t>(outId) < consumer->nbOutputs() - 1; ++outId) {
+                for (IOIndex_t outId = 0; outId < consumer->nbOutputs() - 1; ++outId) {
                     printf("%ld\n\t\t\t", consumer->getOperator()->getNbProducedData(outId));
                 }
                 printf("%ld", consumer->getOperator()->getNbProducedData(static_cast<IOIndex_t>(consumer->nbOutputs()) - 1));
@@ -166,7 +166,7 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
             }
 
             bool computationOverForConsumer = true;
-            for (IOIndex_t parentIDi = 0; static_cast<IONb_t>(parentIDi) < consumer->nbInputs(); ++parentIDi) {
+            for (IOIndex_t parentIDi = 0; parentIDi < consumer->nbInputs(); ++parentIDi) {
                 if (consumer->getOperator()->getNbConsumedData(parentIDi) <
                     consumer->getOperator()->getNbRequiredData(parentIDi)) {
                     computationOverForConsumer = false;
@@ -177,7 +177,7 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
                 computationOver.insert(consumer);
             }
 
-            for (IOIndex_t outId = 0; static_cast<IONb_t>(outId) < consumer->nbOutputs(); ++outId) {
+            for (IOIndex_t outId = 0; outId < consumer->nbOutputs(); ++outId) {
                 if (consumer->getOperator()->getNbProducedData(outId) > 0) {
                     if (verbose) printf("  also producer\n");
                     // make sure consumer is also a producer
diff --git a/unit_tests/graph/Test_Connector.cpp b/unit_tests/graph/Test_Connector.cpp
index b82c9af05d88111d424c22f92e441058ea870ff9..ef70521d0552f87a9f293ea03ef99bcfed7c13f2 100644
--- a/unit_tests/graph/Test_Connector.cpp
+++ b/unit_tests/graph/Test_Connector.cpp
@@ -19,7 +19,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("Connector Creation", "[Connector]") {
+TEST_CASE("[core/graph] Connector(Constructor)") {
     SECTION("Empty") {
         Connector x = Connector();
         REQUIRE(x.index() == gk_IODefaultIndex);
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 9fb7a10029e212f4071d2f25f86deead078902a1..dc693193c6606c99b1628d23ad253015f8f8dbe6 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -25,14 +25,14 @@
 
 using namespace Aidge;
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(Constructor)") {
+TEST_CASE("[core/graph] GraphView(Constructor)") {
     std::shared_ptr<GraphView> g0 = std::make_shared<GraphView>();
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("G1");
     REQUIRE(g0 != nullptr);
     REQUIRE(g1 != nullptr);
 }
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(add)") {
+TEST_CASE("[core/graph] GraphView(add)") {
     SECTION("Node alone") {
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
         std::shared_ptr<Node> GOp1 = GenericOperator("Fictive", 0, 0, 0, "Gop1");
@@ -93,7 +93,7 @@ TEST_CASE("[aidge/_CORE/graph] GraphView(add)") {
     }
 }
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(addChild)") {
+TEST_CASE("[core/graph] GraphView(addChild)") {
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
     std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
     std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
@@ -158,7 +158,7 @@ TEST_CASE("[aidge/_CORE/graph] GraphView(addChild)") {
     REQUIRE(g1->getChildren(conv3) == std::set<std::shared_ptr<Node>>({conv3_5}));
 }
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(inputs)") {
+TEST_CASE("[core/graph] GraphView(inputs)") {
     auto g1 = std::make_shared<GraphView>("TestGraph");
     std::shared_ptr<Node> conv = Conv(3, 32, {3, 3});
     g1->add(conv);
@@ -166,7 +166,7 @@ TEST_CASE("[aidge/_CORE/graph] GraphView(inputs)") {
     REQUIRE(g1->inputs() == conv->inputs());
 }
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(outputs)") {
+TEST_CASE("[core/graph] GraphView(outputs)") {
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
     std::shared_ptr<Node> conv = Conv(3, 32, {3, 3});
     g1->add(conv);
@@ -174,7 +174,7 @@ TEST_CASE("[aidge/_CORE/graph] GraphView(outputs)") {
     REQUIRE(g1->outputs() == conv->outputs());
 }
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(save)") {
+TEST_CASE("[core/graph] GraphView(save)") {
     std::shared_ptr<GraphView> g1 = std::make_shared<GraphView>("TestGraph");
     std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
     std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 1, 1, "c1");
@@ -194,7 +194,7 @@ TEST_CASE("[aidge/_CORE/graph] GraphView(save)") {
     printf("File saved in ./graphExample.md\n");
 }
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(resetConnections)") {
+TEST_CASE("[core/graph] GraphView(resetConnections)") {
     SECTION("disconnect data iput") {
         std::shared_ptr<Node> conv = GenericOperator("Conv", 1, 1, 1, "c");
         std::shared_ptr<Node> conv1 = GenericOperator("Conv", 1, 3, 1, "c1");
@@ -277,7 +277,7 @@ TEST_CASE("Graph Forward dims", "[GraphView]") {
     }
 }
 
-TEST_CASE("[aidge/_CORE/graph] GraphView(replaceWith)") {
+TEST_CASE("[core/graph] GraphView(replaceWith)") {
     SECTION("replace small pattern") {
         // create original graph
         std::shared_ptr<GraphView> g = std::make_shared<GraphView>("TestGraph");
diff --git a/unit_tests/operator/Test_GenericOperator.cpp b/unit_tests/operator/Test_GenericOperator.cpp
index 44e9b32f0cde49971dba5aa6e857e651a4e945cf..ff41ed468e5b84bf3455c25b327a91730967d3c6 100644
--- a/unit_tests/operator/Test_GenericOperator.cpp
+++ b/unit_tests/operator/Test_GenericOperator.cpp
@@ -17,7 +17,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[aidge/_CORE/operators] GenericOp(add & get parameters)", "[Operator]") {
+TEST_CASE("[core/operators] GenericOp(add & get parameters)", "[Operator]") {
     SECTION("INT") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         int value = 5;
@@ -66,7 +66,7 @@ TEST_CASE("[aidge/_CORE/operators] GenericOp(add & get parameters)", "[Operator]
     }
 }
 
-TEST_CASE("[aidge/_CORE/operator] GenericOp(type check)", "[.ass]") {
+TEST_CASE("[core/operator] GenericOp(type check)", "[.ass]") {
     SECTION("WRONG TYPE FOR GETTER") {
         GenericOperator_Op Testop("TestOp", 1, 1, 1);
         Testop.addParameter<long>("longParam", 3);