diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 0f3132c118f1e67bac530a6dfb74d0dbacf03eb1..f8d17f984aa3a3b8359f84560a93aa100a625c7a 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -19,6 +19,7 @@
 
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/future_std/any.hpp"
 
 namespace Aidge
 {
@@ -47,6 +48,7 @@ public:
         assert(
             detail::isAreaValid(i_FirstDataCoordinates, i_Dimensions)
             && "Tensors requested area is invalid");
+        mStorage.reset();
         mScalarSize = detail::sizeOf(mDataType);
         computeLayout();
     };
@@ -68,10 +70,14 @@ public:
     /// @param src pointer to the raw host buffer from which the data will be copied
     /// @param length Nb of element to copy from the buffer
     virtual void copyFromHost(Byte_t const *const src, NbElts_t length) = 0;
-    /// @deprecated see API V2
-    /// @todo This function returns the address of a data allocated on the spot if it
-    /// does not exists, which is undocumented. Is it a valid design. reconsider.
-    virtual Byte_t *rawPtr() = 0;
+    /// @brief Returns an object that can be used to retrieve the storage on current
+    /// Tensor backend. If no storage has been allocated yet, it tries to allocate one
+    /// before returning.
+    /// @note Returned pointer, when dereferenced, must be cast to the native type of
+    /// the storage in order to be usable.
+    /// @return A pointer to an "any" object that identifies the storage.<br>
+    /// nullptr if the request failed.
+    virtual future_std::any *queryStorage() = 0;
     /// @brief Size of one scalar (in bytes)
     inline std::size_t scalarSize() const noexcept
     {
@@ -96,9 +102,9 @@ public:
     /// the actual data pointer type, all the data can be traversed by simple
     /// pointer arithmetic.
     /// @return Address of the very first data in memory (lexicographic order)
-    inline Byte_t const *getDataAddress() const noexcept
+    inline future_std::any const *getStorage() const noexcept
     {
-        return mStorage;
+        return &mStorage;
     };
 
     /// @brief Getting the address of the very first data in memory (lexicographic
@@ -107,9 +113,9 @@ public:
     /// the actual data pointer type, all the data can be traversed by simple
     /// pointer arithmetic.
     /// @return Address of the very first data in memory (lexicographic order)
-    inline Byte_t *getDataAddress() noexcept
+    inline future_std::any *getStorage() noexcept
     {
-        return mStorage;
+        return &mStorage;
     };
 
     /// @brief Returns a vector of the tensor in-memory dimensions.
@@ -257,14 +263,12 @@ private:
     /// mvLayout[i] == mvLayout[i+1]*mDimensions[i+1];<br>
     /// Besides mvLayout[0]*mDimensions[0] == mNbElts*mvLayout[D-1].
     std::vector<std::size_t> mvLayout;
-    /// @brief Address of the very first data in memory (lexicographic order)
-    /// @details Points to a contiguous in-memory array such that, when cast to
-    /// the actual data pointer type, all the data can be traversed by simple
-    /// pointer arithmetic.
-    /// @deprecated Such storage is meaningless for most backend except CPU.
-    /// @todo If needed, replace by a _any storing a backend-dependant handle on
-    /// storage.
-    Byte_t *mStorage = nullptr;
+    /// @brief Identifier of the TensorImpl storage
+    /// @details As storage identification is backend-dependent, it is stored as an "any"
+    /// object that must be cast back to its native type to be usable.<br>
+    /// For instance, on CPU, the identification object is merely a pointer to the first
+    /// data byte.
+    future_std::any mStorage;
 
 protected:
     /// @brief Number of element setter for inherited classes
@@ -281,13 +285,19 @@ protected:
     {
         mScalarSize = s;
     }
-    /// @brief Data address setter for inherited classes
-    /// @param ptr Address of the first data in the tensor
-    /// @todo Make it template to input ptr type and reinterpret_cast inside?
+    /// @brief Identifier of the TensorImpl storage setter for inherited classes
+    /// @param Identifier of the TensorImpl storage
+    /// @note Should be used only during (re)initialization of storage.
+    inline void setStorageIdentifier(future_std::any &&i_Storage) noexcept
+    {
+        mStorage = i_Storage;
+    }
+    /// @brief Identifier of the TensorImpl storage setter for inherited classes
+    /// @param Identifier of the TensorImpl storage
     /// @note Should be used only during (re)initialization of storage.
-    inline void setDataAddress(unsigned char *ptr) noexcept
+    inline void setStorageIdentifier(future_std::any const &i_Storage) noexcept
     {
-        mStorage = ptr;
+        mStorage = i_Storage;
     }
     /// @brief storing tensor dimensions from lvalue dimensions
     /// @param iDims dimensions to store
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 13af8656d83031175f0dc7f47efb8eddcfa31cfb..ad591463357c5f77b06a128b7267395029b094ad 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -24,6 +24,7 @@
 #include "aidge/utils/ArrayHelpers.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/future_std/any.hpp"
 
 namespace Aidge
 {
@@ -419,9 +420,7 @@ public:
      */
     bool hasData() const
     {
-        // optional<_any> GetMemoryLocation()
-        // check the optional or bool GetMemoryLocation(_any&), check return
-        return (mImpl) ? (getDataAddress() == nullptr ? false : true) : false;
+        return getStorage()->has_value();
     }
 
     /**
@@ -529,7 +528,7 @@ public:
         ///@todo : add assert expected Type compatible with datatype
         ///@todo : add assert idx < Size
         return *reinterpret_cast<expectedType *>(
-            getDataAddress() + idx * getScalarSize());
+            future_std::any_cast<Byte_t *>(*getStorage()) + idx * getScalarSize());
     }
 
     template<typename expectedType>
@@ -546,9 +545,11 @@ public:
     /// @todo redesign
     template<typename expectedType> void set(NbElts_t const idx, expectedType const value)
     {
-        ///@todo : add assert expected Type compatible with datatype
-        ///@todo : add assert idx < Size
-        unsigned char *dataPtr = getDataAddress() + idx * getScalarSize();
+        ///@todo add assert expected Type compatible with datatype
+        ///@todo add assert idx < Size
+        ///@todo redesign to work with any backend, this one works only for CPU storage
+        Byte_t *dataPtr
+            = future_std::any_cast<Byte_t *>(*getStorage()) + idx * getScalarSize();
         ///@bug only valid for trivially copyable data
         std::memcpy(dataPtr, &value, getScalarSize());
     }
@@ -559,6 +560,9 @@ public:
         set<expectedType>(getIdx(coordIdx), value);
     }
 
+    /// @brief Serializes tensor containt
+    /// @return Tensor as pretty string
+    /// @todo FIXME implementation only valid for CPU backend
     std::string toString() const;
 
     inline void print() const
@@ -585,21 +589,19 @@ public:
     NbElts_t getIdx(std::vector<Coord_t> const &coordIdx) const noexcept;
 
 private:
-    /// @brief Getting the address of the very first data in memory (lexicographic
-    /// order), read only access to data
-    /// @details Points to a contiguous in-memory array such that, when cast to
-    /// the actual data pointer type, all the data can be traversed by simple
-    /// pointer arithmetic.
-    /// @return Address of the very first data in memory (lexicographic order)
-    Byte_t const *getDataAddress() const noexcept;
-
-    /// @brief Getting the address of the very first data in memory (lexicographic
-    /// order), read-write access to data
-    /// @details Points to a contiguous in-memory array such that, when cast to
-    /// the actual data pointer type, all the data can be traversed by simple
-    /// pointer arithmetic.
-    /// @return Address of the very first data in memory (lexicographic order)
-    Byte_t *getDataAddress() noexcept;
+    /// @brief Getting a pointer to a read-only storage object
+    /// @details The returned reference object must be cast to its native type to be
+    /// usable.
+    /// @sa TensorImpl::mStorage
+    /// @return read-only address of the TensorImpl storage object
+    future_std::any const *getStorage() const noexcept;
+
+    /// @brief Getting a pointer to a read-write storage object
+    /// @details The returned reference object must be cast to its native type to be
+    /// usable.
+    /// @sa TensorImpl::mStorage
+    /// @return read-write address of the TensorImpl storage object
+    future_std::any *getStorage() noexcept;
 
     /// @brief Getting the size of the stored data type in bytes.
     /// @return Size of the stored data type in bytes.
diff --git a/include/aidge/hook/OutputRange.hpp b/include/aidge/hook/OutputRange.hpp
index d127f22f83a14d84d143781ba8b85e4da70a9e36..685d28afe571b9732e48639633458676776c8d55 100644
--- a/include/aidge/hook/OutputRange.hpp
+++ b/include/aidge/hook/OutputRange.hpp
@@ -44,7 +44,8 @@ public:
         // tensor->print();
         // std::cout << "call() outputRange hook : tensor printed" << std::endl;
         float max_value = 0.;
-        float *casted_tensor = static_cast<float *>(tensor->getImpl().rawPtr());
+        float *casted_tensor = static_cast<float *>(
+            future_std::any_cast<Byte_t *>(*tensor->getImpl().queryStorage()));
         // find the absolute max value in the tensor, save it to registered outputs
         for (std::size_t i = 0; i < tensor->size(); ++i)
         {
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index ed9db85e9f991c8607974a4a9a9c17652a70b478..b7a14b19333d8397b6973198523e7a5639e7d123 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -193,9 +193,11 @@ void init_Tensor(py::module& m)
                     throw py::value_error("Unsupported data format");
             }
 
+            /// @todo FIXME only valid for CPU tensor
             return py::buffer_info(
-                const_cast<void*>(reinterpret_cast<void const*>(
-                    tensorImpl.getDataAddress())), /* Pointer to buffer */
+                const_cast<void*>(
+                    reinterpret_cast<void const*>(future_std::any_cast<Byte_t*>(
+                        *tensorImpl.getStorage()))), /* Pointer to buffer */
                 tensorImpl.scalarSize(), /* Size of one scalar */
                 dataFormatDescriptor, /* Python struct-style format descriptor */
                 b.nbDims(), /* Number of dimensions */
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index d3860f54bbc67005279e6bcde1b9b4b539f9dd1c..eb64d0611670911b809e4cc0318c44140eb2025d 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -9,45 +9,58 @@
  *
  ********************************************************************************/
 
-#include <pybind11/pybind11.h>
-#include <pybind11/stl.h>
-#include <memory>
-#include <string>
+#include "aidge/data/Data.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
-#include "aidge/data/Data.hpp"
+#include <memory>
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
 
 namespace py = pybind11;
-namespace Aidge {
-void init_GraphView(py::module& m) {
+namespace Aidge
+{
+void init_GraphView(py::module& m)
+{
     py::class_<GraphView, std::shared_ptr<GraphView>>(m, "GraphView")
-          .def(py::init<>())
-          .def("save", &GraphView::save, py::arg("path"), py::arg("verbose") = false,
-          R"mydelimiter(
+        .def(py::init<>())
+        .def(
+            "save",
+            &GraphView::save,
+            py::arg("path"),
+            py::arg("verbose") = false,
+            R"mydelimiter(
           Save the GraphView as a Mermaid graph in a .md file at the specified location.
           
           :param path: save location
           :type path: str
           )mydelimiter")
 
-          .def("get_output_nodes", &GraphView::outputNodes,
-          R"mydelimiter(
+        .def(
+            "get_output_nodes",
+            &GraphView::outputNodes,
+            R"mydelimiter(
           Get set of output Nodes.
           
           :rtype: list[Node]
           )mydelimiter")
 
-          .def("get_input_nodes", &GraphView::inputNodes,
-          R"mydelimiter(
+        .def(
+            "get_input_nodes",
+            &GraphView::inputNodes,
+            R"mydelimiter(
           Get set of input Nodes.
           
           :rtype: list[Node]
           )mydelimiter")
 
-          .def("add", (void (GraphView::*)(std::shared_ptr<Node>, bool)) & GraphView::add,
-               py::arg("other_node"), py::arg("include_learnable_parameters") = true,
-          R"mydelimiter(
+        .def(
+            "add",
+            (void(GraphView::*)(std::shared_ptr<Node>, bool)) & GraphView::add,
+            py::arg("other_node"),
+            py::arg("include_learnable_parameters") = true,
+            R"mydelimiter(
           Include a Node to the current GraphView object.
           
           :param other_node: Node to add
@@ -56,15 +69,16 @@ void init_GraphView(py::module& m) {
           :type includeLearnableParameter: bool
           )mydelimiter")
 
-          .def("add_child",
-               (void (GraphView::*)(std::shared_ptr<Node>,
-                                   std::shared_ptr<Node>,
-                                   const IOIndex_t,
-                                   IOIndex_t)) &
-                    GraphView::addChild,
-               py::arg("toOtherNode"), py::arg("fromOutNode") = nullptr,
-               py::arg("fromTensor") = 0U, py::arg("toTensor") = gk_IODefaultIndex,
-          R"mydelimiter(
+        .def(
+            "add_child",
+            (void(GraphView::*)(
+                std::shared_ptr<Node>, std::shared_ptr<Node>, const IOIndex_t, IOIndex_t))
+                & GraphView::addChild,
+            py::arg("toOtherNode"),
+            py::arg("fromOutNode") = nullptr,
+            py::arg("fromTensor") = 0U,
+            py::arg("toTensor") = gk_IODefaultIndex,
+            R"mydelimiter(
           Include a Node to the current GraphView object.
           
           :param other_node: Node to add
@@ -72,9 +86,12 @@ void init_GraphView(py::module& m) {
           :param includeLearnableParameter: include non-data inputs, like weights and biases. Default True.
           :type includeLearnableParameter
           )mydelimiter")
-          
-          .def("replace_with", &GraphView::replaceWith, py::arg("new_nodes"),
-          R"mydelimiter(
+
+        .def(
+            "replace_with",
+            &GraphView::replaceWith,
+            py::arg("new_nodes"),
+            R"mydelimiter(
           Replace the current GraphView with the set of given Nodes if possible.
           
           :param new_nodes: Nodes with connections already taken care of.
@@ -83,24 +100,26 @@ void init_GraphView(py::module& m) {
           :rtype: bool
           )mydelimiter")
 
-          .def("get_nodes", &GraphView::getNodes)
-          .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims)
-          .def("__call__", &GraphView::operator(), py::arg("connectors"))
-          .def("set_datatype", &GraphView::setDatatype, py::arg("datatype"))
-          .def("set_backend", &GraphView::setBackend, py::arg("backend"))
-          //   .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
-          //      // TODO : Should return error if backend not compatible with get
-          //      if (idx >= b.size()) throw py::index_error();
-          //      switch(b.dataType()){
-          //           case DataType::Float32:
-          //                return py::cast(static_cast<float*>(b.getImpl().rawPtr())[idx]);
-          //           case DataType::Int32:
-          //                return py::cast(static_cast<int*>(b.getImpl().rawPtr())[idx]);
-          //           default:
-          //                return py::none();
-          //           }
-          //      })
-            ;
+        .def("get_nodes", &GraphView::getNodes)
+        .def("get_node", &GraphView::getNode, py::arg("node_name"))
+        .def("forward_dims", &GraphView::forwardDims)
+        .def("__call__", &GraphView::operator(), py::arg("connectors"))
+        .def("set_datatype", &GraphView::setDatatype, py::arg("datatype"))
+        .def("set_backend", &GraphView::setBackend, py::arg("backend"))
+        //   .def("__getitem__", [](Tensor& b, size_t idx)-> py::object {
+        //      // TODO : Should return error if backend not compatible with get
+        //      if (idx >= b.size()) throw py::index_error();
+        //      switch(b.dataType()){
+        //           case DataType::Float32:
+        //                return py::cast(static_cast<float*>(future_std::any_cast<Byte_t
+        //                *>(*b.getImpl().queryStorage())[idx]));
+        //           case DataType::Int32:
+        //                return py::cast(static_cast<int*>(future_std::any_cast<Byte_t
+        //                *>(*b.getImpl().queryStorage())[idx]));
+        //           default:
+        //                return py::none();
+        //           }
+        //      })
+        ;
 }
-}  // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
\ No newline at end of file
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 28f90e7002f74df662f467c3a7713d700070bb67..f22789e9a3063fdcfce1d7a0cdb09f03635baac4 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -96,7 +96,8 @@ void Tensor::setBackend(const std::string &name)
                 /// @todo FIXME
                 assert(false && "So far copy between different backend is not supported");
                 // _any getMemoryLocation();
-                Byte_t const *data = getDataAddress();
+                // newImpl->copy(*getStorage());
+                Byte_t const *data = future_std::any_cast<Byte_t *>(*getStorage());
                 newImpl->copyFromHost(
                     data, size()); // /!\ it does not cast data but reinterpret them
             }
@@ -129,16 +130,16 @@ NbElts_t Tensor::getIdx(std::vector<Coord_t> const &coordIdx) const noexcept
 
 /// @brief Getting the address of the very first data in memory (lexicographic
 /// order), read only access to data
-Byte_t const *Tensor::getDataAddress() const noexcept
+future_std::any const *Tensor::getStorage() const noexcept
 {
-    return mImpl->getDataAddress();
+    return mImpl->getStorage();
 }
 
 /// @brief Getting the address of the very first data in memory (lexicographic
 /// order), read-write access to data
-Byte_t *Tensor::getDataAddress() noexcept
+future_std::any *Tensor::getStorage() noexcept
 {
-    return mImpl->getDataAddress();
+    return mImpl->getStorage();
 }
 
 /// @brief Getting the size of the stored data type in bytes.
@@ -181,7 +182,8 @@ void Tensor::setDatatype(const DataType dt)
                 && "So far copy between different data types is not properly "
                    "implemented");
             // _any getMemoryLocation();
-            Byte_t const *data = getDataAddress();
+            // newImpl->copy(*getStorage());
+            Byte_t const *data = future_std::any_cast<Byte_t *>(*getStorage());
             newImpl->copyFromHost(
                 data, size()); // /!\ it does not cast data but reinterpret them
         }
@@ -317,7 +319,8 @@ std::string Tensor::toString() const
                                        + std::to_string(
                                            reinterpret_cast<
                                                detail::CppType_t<DataType::Int32> *>(
-                                               mImpl->rawPtr())[counter++])
+                                               future_std::any_cast<Byte_t *>(
+                                                   *mImpl->queryStorage()))[counter++])
                                        + ",";
                                 break;
                             case DataType::Float64:
@@ -325,7 +328,8 @@ std::string Tensor::toString() const
                                        + std::to_string(
                                            reinterpret_cast<
                                                detail::CppType_t<DataType::Float64> *>(
-                                               mImpl->rawPtr())[counter++])
+                                               future_std::any_cast<Byte_t *>(
+                                                   *mImpl->queryStorage()))[counter++])
                                        + ",";
                                 break;
                             default:
@@ -333,7 +337,8 @@ std::string Tensor::toString() const
                                        + std::to_string(
                                            reinterpret_cast<
                                                detail::CppType_t<DataType::Float32> *>(
-                                               mImpl->rawPtr())[counter++])
+                                               future_std::any_cast<Byte_t *>(
+                                                   *mImpl->queryStorage()))[counter++])
                                        + ",";
                                 break;
                         }
@@ -344,7 +349,8 @@ std::string Tensor::toString() const
                             res += " "
                                    + std::to_string(reinterpret_cast<
                                                     detail::CppType_t<DataType::Int32> *>(
-                                       mImpl->rawPtr())[counter++])
+                                       future_std::any_cast<Byte_t *>(
+                                           *mImpl->queryStorage()))[counter++])
                                    + "}";
                             break;
                         case DataType::Float64:
@@ -352,7 +358,8 @@ std::string Tensor::toString() const
                                    + std::to_string(
                                        reinterpret_cast<
                                            detail::CppType_t<DataType::Float64> *>(
-                                           mImpl->rawPtr())[counter++])
+                                           future_std::any_cast<Byte_t *>(
+                                               *mImpl->queryStorage()))[counter++])
                                    + "}";
                             break;
                         default:
@@ -360,7 +367,8 @@ std::string Tensor::toString() const
                                    + std::to_string(
                                        reinterpret_cast<
                                            detail::CppType_t<DataType::Float32> *>(
-                                           mImpl->rawPtr())[counter++])
+                                           future_std::any_cast<Byte_t *>(
+                                               *mImpl->queryStorage()))[counter++])
                                    + "}";
                             break;
                     }
@@ -396,21 +404,24 @@ std::string Tensor::toString() const
                     res += " "
                            + std::to_string(
                                reinterpret_cast<detail::CppType_t<DataType::Int32> *>(
-                                   mImpl->rawPtr())[j])
+                                   future_std::any_cast<Byte_t *>(
+                                       *mImpl->queryStorage()))[j])
                            + ((j < dims()[0] - 1) ? "," : "");
                     break;
                 case DataType::Float64:
                     res += " "
                            + std::to_string(
                                reinterpret_cast<detail::CppType_t<DataType::Float64> *>(
-                                   mImpl->rawPtr())[j])
+                                   future_std::any_cast<Byte_t *>(
+                                       *mImpl->queryStorage()))[j])
                            + ((j < dims()[0] - 1) ? "," : "");
                     break;
                 default:
                     res += " "
                            + std::to_string(
                                reinterpret_cast<detail::CppType_t<DataType::Float32> *>(
-                                   mImpl->rawPtr())[j])
+                                   future_std::any_cast<Byte_t *>(
+                                       *mImpl->queryStorage()))[j])
                            + ((j < dims()[0] - 1) ? "," : "");
                     break;
             }
diff --git a/unit_tests/CMakeLists.txt b/unit_tests/CMakeLists.txt
index 69f1269f2e06a142d9b73315cca3fb98fc9be5d8..6c6e11f5f1f7306df36125e16c10411643d8b0d4 100644
--- a/unit_tests/CMakeLists.txt
+++ b/unit_tests/CMakeLists.txt
@@ -15,19 +15,20 @@ file(GLOB_RECURSE src_files "*.cpp")
 # list(APPEND src_files "graph/Test_Connector.cpp")
 # list(APPEND src_files "graph/Test_GraphView.cpp")
 # #MESSAGE(WARNING "Memory leak in graphMatching/Test_GRegex.cpp")
-# list(APPEND src_files "graphMatching/Test_GRegex.cpp")
+# # list(APPEND src_files "graphMatching/Test_GRegex.cpp")
 # list(APPEND src_files "graphMatching/Test_NodeRegex.cpp")
 # #MESSAGE(WARNING "Memory leak in graphMatching/Test_SeqStm.cpp")
 # list(APPEND src_files "graphMatching/Test_SeqStm.cpp")
 # #MESSAGE(WARNING "Memory leak in graphMatching/Test_StmFactory.cpp")
-# list(APPEND src_files "graphMatching/Test_StmFactory.cpp")
+# # list(APPEND src_files "graphMatching/Test_StmFactory.cpp")
 # list(APPEND src_files "operator/Test_GenericOperator.cpp")
 
 add_executable(tests${module_name} ${src_files})
 
 target_compile_features(tests${module_name} PRIVATE cxx_std_14)
 
-set(FORCE_CI TRUE)
+# set(FORCE_CI TRUE)
+set(FORCE_CI FALSE)
 if (NOT(FORCE_CI))
 
 if (DOSANITIZE STREQUAL "ON")
diff --git a/unit_tests/graphRegex/Test_Fsm.cpp b/unit_tests/graphRegex/Test_Fsm.cpp
index e5950f21b323f07b380ae95f70637ca48a173481..cfb01ddccd61ebae9c82ff2980a3cc01f179da4e 100644
--- a/unit_tests/graphRegex/Test_Fsm.cpp
+++ b/unit_tests/graphRegex/Test_Fsm.cpp
@@ -4,191 +4,209 @@
 
 #include "aidge/nodeTester/ConditionalInterpreter.hpp"
 
-#include "aidge/graphRegex/matchFsm/FsmNode.hpp"
 #include "aidge/graphRegex/matchFsm/FsmEdge.hpp"
 #include "aidge/graphRegex/matchFsm/FsmGraph.hpp"
+#include "aidge/graphRegex/matchFsm/FsmNode.hpp"
 #include "aidge/graphRegex/matchFsm/FsmRunTimeContext.hpp"
 
 using namespace Aidge;
 
-TEST_CASE("matchFSM", "FsmEdge") {
-
-   
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
-        FsmEdgeUnique EdgeToTest(nodeA,nodeB,toTest);
-
-    SECTION("FsmEdgeUnique constructor") {
-        REQUIRE(EdgeToTest.getSourceNode() == nodeA);
-        REQUIRE(EdgeToTest.getDestNode() == nodeB);
-        REQUIRE(EdgeToTest.isCommon() == false);
+TEST_CASE("matchFSM", "FsmEdge")
+{
+    std::shared_ptr<FsmNode> node0 = std::make_shared<FsmNode>(true, false);
+    std::shared_ptr<FsmNode> node1 = std::make_shared<FsmNode>(false, true);
+    std::shared_ptr<ConditionalInterpreter> toTest0
+        = std::make_shared<ConditionalInterpreter>("true==true");
+    FsmEdgeUnique EdgeToTest0(node0, node1, toTest0);
+
+    SECTION("FsmEdgeUnique constructor")
+    {
+        REQUIRE(EdgeToTest0.getSourceNode() == node0);
+        REQUIRE(EdgeToTest0.getDestNode() == node1);
+        REQUIRE(EdgeToTest0.isCommon() == false);
     }
-    
-    SECTION("FsmEdgeCommon constructor") {
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
 
-        FsmEdgeCommon EdgeToTest(nodeA,nodeB,toTest,"A");
+    SECTION("FsmEdgeCommon constructor")
+    {
+        std::shared_ptr<FsmNode> nodeA = std::make_shared<FsmNode>(true, false);
+        std::shared_ptr<FsmNode> nodeB = std::make_shared<FsmNode>(false, true);
+        std::shared_ptr<ConditionalInterpreter> toTest
+            = std::make_shared<ConditionalInterpreter>("true==true");
+
+        FsmEdgeCommon EdgeToTest(nodeA, nodeB, toTest, "A");
 
         REQUIRE(EdgeToTest.getSourceNode() == nodeA);
         REQUIRE(EdgeToTest.getDestNode() == nodeB);
         REQUIRE(EdgeToTest.isCommon() == true);
     }
 
-    SECTION("FsmEdgeRef constructor") {
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
+    SECTION("FsmEdgeRef constructor")
+    {
+        std::shared_ptr<FsmNode> nodeA = std::make_shared<FsmNode>(true, false);
+        std::shared_ptr<FsmNode> nodeB = std::make_shared<FsmNode>(false, true);
+        std::shared_ptr<ConditionalInterpreter> toTest
+            = std::make_shared<ConditionalInterpreter>("true==true");
 
-        FsmEdgeRef EdgeToTest(nodeA,nodeB,0,-1);
+        FsmEdgeRef EdgeToTest(nodeA, nodeB, 0, -1);
 
         REQUIRE(EdgeToTest.getSourceNode() == nodeA);
         REQUIRE(EdgeToTest.getDestNode() == nodeB);
         REQUIRE(EdgeToTest.isCommon() == false);
     }
-          
-    SECTION("FsmEdgeEmpty constructor") {
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
 
-        FsmEdgeEmpty EdgeToTest(nodeA,nodeB);
+    SECTION("FsmEdgeEmpty constructor")
+    {
+        std::shared_ptr<FsmNode> nodeA = std::make_shared<FsmNode>(true, false);
+        std::shared_ptr<FsmNode> nodeB = std::make_shared<FsmNode>(false, true);
+        std::shared_ptr<ConditionalInterpreter> toTest
+            = std::make_shared<ConditionalInterpreter>("true==true");
+
+        FsmEdgeEmpty EdgeToTest(nodeA, nodeB);
 
         REQUIRE(EdgeToTest.getSourceNode() == nodeA);
         REQUIRE(EdgeToTest.getDestNode() == nodeB);
         REQUIRE(EdgeToTest.isCommon() == false);
     }
 
-
-    SECTION("FsmEdgeFactory"){
-
-    std::map<std::string, std::shared_ptr<ConditionalInterpreter>> allTest = {
-        {"A",std::make_shared<ConditionalInterpreter>("true==true")},
-        {"B",std::make_shared<ConditionalInterpreter>("true==true")},
-        {"C",std::make_shared<ConditionalInterpreter>("true==true")}
-    };
-
-// make(std::shared_ptr<FsmNode> source, std::shared_ptr<FsmNode> dest, 
-//     FsmEdgeTypes type,std::map<std::string, const std::shared_ptr<ConditionalInterpreter>> allTest,
-//     const std::string& lexeme = "");
-
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(true,false);
-//     EMPTY = 0,
-//     REF,
-//     COMMON,
-//     UNIQUE
-
-        std::shared_ptr<FsmEdge> edgeE = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::EMPTY,allTest,"");
-        std::shared_ptr<FsmEdge> edgeU = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::UNIQUE,allTest,"A");
-        std::shared_ptr<FsmEdge> edgeC = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::COMMON,allTest,"A#");
-        std::shared_ptr<FsmEdge> edgeR = FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::REF,allTest,"(0,1)");
-
-        //test detection of bad syntax lexem
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::EMPTY,allTest,"A"));
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::UNIQUE,allTest,"A#"));
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::COMMON,allTest,"A"));
-        REQUIRE_THROWS(FsmEdgeFactory::make(nodeA,nodeB,FsmEdgeTypes::REF,allTest,"A"));
+    SECTION("FsmEdgeFactory")
+    {
+        std::map<std::string, std::shared_ptr<ConditionalInterpreter>> allTest
+            = {{"A", std::make_shared<ConditionalInterpreter>("true==true")},
+               {"B", std::make_shared<ConditionalInterpreter>("true==true")},
+               {"C", std::make_shared<ConditionalInterpreter>("true==true")}};
+
+        // make(std::shared_ptr<FsmNode> source, std::shared_ptr<FsmNode> dest,
+        //     FsmEdgeTypes type,std::map<std::string, const
+        //     std::shared_ptr<ConditionalInterpreter>> allTest, const std::string& lexeme
+        //     = "");
+
+        std::shared_ptr<FsmNode> nodeA = std::make_shared<FsmNode>(false, true);
+        std::shared_ptr<FsmNode> nodeB = std::make_shared<FsmNode>(true, false);
+        //     EMPTY = 0,
+        //     REF,
+        //     COMMON,
+        //     UNIQUE
+
+        std::shared_ptr<FsmEdge> edgeE
+            = FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::EMPTY, allTest, "");
+        std::shared_ptr<FsmEdge> edgeU
+            = FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::UNIQUE, allTest, "A");
+        std::shared_ptr<FsmEdge> edgeC
+            = FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::COMMON, allTest, "A#");
+        std::shared_ptr<FsmEdge> edgeR
+            = FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::REF, allTest, "(0,1)");
+
+        // test detection of bad syntax lexem
+        REQUIRE_THROWS(
+            FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::EMPTY, allTest, "A"));
+        REQUIRE_THROWS(
+            FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::UNIQUE, allTest, "A#"));
+        REQUIRE_THROWS(
+            FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::COMMON, allTest, "A"));
+        REQUIRE_THROWS(
+            FsmEdgeFactory::make(nodeA, nodeB, FsmEdgeTypes::REF, allTest, "A"));
 
         REQUIRE(edgeE->getSourceNode() == nodeA);
         REQUIRE(edgeE->getDestNode() == nodeB);
     }
 
-    SECTION("graph constructor") {
-        //make the nodes 
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,false);
-        std::shared_ptr<FsmNode>  nodeC = std::make_shared<FsmNode>(false,true);
+    SECTION("graph constructor")
+    {
+        // make the nodes
+        std::shared_ptr<FsmNode> nodeA = std::make_shared<FsmNode>(true, false);
+        std::shared_ptr<FsmNode> nodeB = std::make_shared<FsmNode>(false, false);
+        std::shared_ptr<FsmNode> nodeC = std::make_shared<FsmNode>(false, true);
 
-        //make the edges
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
-        std::shared_ptr<FsmEdge> edgeAB =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
-        std::shared_ptr<FsmEdge> edgeBC =  std::make_shared<FsmEdgeUnique>(nodeB,nodeC,toTest);
- 
-        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>();
+        // make the edges
+        std::shared_ptr<ConditionalInterpreter> toTest
+            = std::make_shared<ConditionalInterpreter>("true==true");
+        std::shared_ptr<FsmEdge> edgeAB
+            = std::make_shared<FsmEdgeUnique>(nodeA, nodeB, toTest);
+        std::shared_ptr<FsmEdge> edgeBC
+            = std::make_shared<FsmEdgeUnique>(nodeB, nodeC, toTest);
+
+        std::shared_ptr<FsmGraph> graph = std::make_shared<FsmGraph>();
 
         graph->addEdge(edgeAB);
         graph->addEdge(edgeBC);
-        
 
         REQUIRE(graph->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{nodeA});
         REQUIRE(graph->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{nodeC});
     }
 
+    SECTION("graph merge")
+    {
+        std::shared_ptr<ConditionalInterpreter> toTest
+            = std::make_shared<ConditionalInterpreter>("true==true");
 
-    SECTION("graph merge") {
+        // make the nodes
+        std::shared_ptr<FsmNode> nodeA = std::make_shared<FsmNode>(false, true);
+        std::shared_ptr<FsmNode> nodeB = std::make_shared<FsmNode>(false, false);
+        std::shared_ptr<FsmNode> nodeC = std::make_shared<FsmNode>(true, false);
 
-        std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
+        // make the edges
 
-        //make the nodes 
-        std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,false);
-        std::shared_ptr<FsmNode>  nodeC = std::make_shared<FsmNode>(true,false);
+        std::shared_ptr<FsmEdge> edgeAB
+            = std::make_shared<FsmEdgeUnique>(nodeA, nodeB, toTest);
+        std::shared_ptr<FsmEdge> edgeBC
+            = std::make_shared<FsmEdgeUnique>(nodeB, nodeC, toTest);
 
-        //make the edges
-        
-        std::shared_ptr<FsmEdge> edgeAB =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
-        std::shared_ptr<FsmEdge> edgeBC =  std::make_shared<FsmEdgeUnique>(nodeB,nodeC,toTest);
- 
-        std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>();
+        std::shared_ptr<FsmGraph> graph = std::make_shared<FsmGraph>();
         graph->addEdge(edgeAB);
         graph->addEdge(edgeBC);
 
         REQUIRE(graph->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{nodeC});
         REQUIRE(graph->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{nodeA});
-        REQUIRE(graph->getNodes() == std::set<std::shared_ptr<FsmNode>>{nodeA,nodeB,nodeC});
-
-                //make the nodes 
-        std::shared_ptr<FsmNode>  node2A = std::make_shared<FsmNode>(false,true);
-        std::shared_ptr<FsmNode>  node2B = std::make_shared<FsmNode>(false,false);
-        std::shared_ptr<FsmNode>  node2C = std::make_shared<FsmNode>(true,false);
+        REQUIRE(
+            graph->getNodes() == std::set<std::shared_ptr<FsmNode>>{nodeA, nodeB, nodeC});
 
+        // make the nodes
+        std::shared_ptr<FsmNode> node2A = std::make_shared<FsmNode>(false, true);
+        std::shared_ptr<FsmNode> node2B = std::make_shared<FsmNode>(false, false);
+        std::shared_ptr<FsmNode> node2C = std::make_shared<FsmNode>(true, false);
 
-        std::shared_ptr<FsmEdge> edge2AB =  std::make_shared<FsmEdgeUnique>(node2A,node2B,toTest);
-        std::shared_ptr<FsmEdge> edge2BC =  std::make_shared<FsmEdgeUnique>(node2B,node2C,toTest);
- 
-        std::shared_ptr<FsmGraph> graph2 =  std::make_shared<FsmGraph>();
+        std::shared_ptr<FsmEdge> edge2AB
+            = std::make_shared<FsmEdgeUnique>(node2A, node2B, toTest);
+        std::shared_ptr<FsmEdge> edge2BC
+            = std::make_shared<FsmEdgeUnique>(node2B, node2C, toTest);
 
+        std::shared_ptr<FsmGraph> graph2 = std::make_shared<FsmGraph>();
 
         graph2->addEdge(edge2AB);
         graph2->addEdge(edge2BC);
 
-        
         REQUIRE(graph2->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{node2C});
         REQUIRE(graph2->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{node2A});
-        REQUIRE(graph2->getNodes() == std::set<std::shared_ptr<FsmNode>>{node2A,node2B,node2C});
-
+        REQUIRE(
+            graph2->getNodes()
+            == std::set<std::shared_ptr<FsmNode>>{node2A, node2B, node2C});
 
         graph->mergeOneStartOneValid(graph2);
 
         REQUIRE(graph->getValidNodes() == std::set<std::shared_ptr<FsmNode>>{node2C});
         REQUIRE(graph->getStartNodes() == std::vector<std::shared_ptr<FsmNode>>{nodeA});
-        REQUIRE(graph->getNodes() == std::set<std::shared_ptr<FsmNode>>{nodeA,nodeB,nodeC,node2B,node2C});
+        REQUIRE(
+            graph->getNodes()
+            == std::set<std::shared_ptr<FsmNode>>{nodeA, nodeB, nodeC, node2B, node2C});
     }
-
-
-
-
 }
 
 // TEST_CASE("matchFSM", "FsmGraph") {
 
 //     SECTION("FsmEdgeUnique constructor") {
-//         //make the nodes 
+//         //make the nodes
 //         std::shared_ptr<FsmNode>  nodeA = std::make_shared<FsmNode>(true,false);
 //         std::shared_ptr<FsmNode>  nodeB = std::make_shared<FsmNode>(false,true);
 
 //         //make the edges
-//         std::shared_ptr<ConditionalInterpreter> toTest =  std::make_shared<ConditionalInterpreter>("true==true");
-//         std::shared_ptr<FsmEdgeUnique> edge =  std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
- 
+//         std::shared_ptr<ConditionalInterpreter> toTest =
+//         std::make_shared<ConditionalInterpreter>("true==true");
+//         std::shared_ptr<FsmEdgeUnique> edge =
+//         std::make_shared<FsmEdgeUnique>(nodeA,nodeB,toTest);
+
 //         std::shared_ptr<FsmGraph> graph =  std::make_shared<FsmGraph>();
 
 //         graph->addEdge(edge);
-        
-
 
 //     }