diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
index 1e67ce273abc7d6b02f9e3148264ff3f9ea1cf07..924fd995aff34016cd4fa792a550d3d06db0449c 100644
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ b/.gitlab/ci/test.gitlab-ci.yml
@@ -37,6 +37,7 @@ test:windows_cpp:
     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
     # Install dependencies
     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+    - choco install python -Y
     # Update PATH
     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
   script:
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 718eddeaf6a5d08c9dab4898f5a57c0192dcb80b..f11136adaaa3d23fa9d3dc5749dd5d6771cbc42c 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -208,7 +208,7 @@ public:
      * @brief Get the Nodes pointed to by the GraphView object.
      * @return std::set<NodePtr>
      */
-    inline std::set<NodePtr> getNodes() const { return mNodes; }
+    inline const std::set<NodePtr>& getNodes() const { return mNodes; }
 
     /**
      * @brief Get the operator with the corresponding name if it is in the
@@ -217,7 +217,7 @@ public:
      * @return NodePtr returns a new empty node if the one asked for
      * was not found.
      */
-    NodePtr getNode(const char *nodeName) const;
+    NodePtr getNode(const std::string& nodeName) const;
 
     /**
      * @brief Remove a Node from the current GraphView scope without affecting its connections.
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index f056505e6e7839266213ac355cc0e1b93ab98f0d..11def52dbab30159e9e882fb19d16f1549aa3887 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -62,7 +62,7 @@ public:
    * @param op Operator giving the Node its number of connections.
    * @param name (optional) name for the Node.
    */
-  Node(std::shared_ptr<Operator> op, const char *name = nullptr);
+  Node(std::shared_ptr<Operator> op, const std::string& name = "");
 
   virtual ~Node() = default;
 
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index c96b2c571f412124ccdfb83dde685e111448a222..ff3d1888c3bc70b61a3d4da42908d40de2d1d73e 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -141,7 +141,7 @@ public:
 };
 
 template <std::size_t NUM>
-inline std::shared_ptr<Node> Add(const char* name = nullptr) {
+inline std::shared_ptr<Node> Add(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Add_Op<NUM>>(), name);
 }
 }
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 7bf8740877e635cc2e59418bee1c444c7f3884e8..bf76bd45893b43043b81cd6563c500be27c66b42 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -146,7 +146,7 @@ public:
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel_dims,
-                                           const char *name = nullptr,
+                                           const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
     // FIXME: properly handle default w&b initialization in every cases
@@ -158,7 +158,7 @@ inline std::shared_ptr<Node> AvgPooling(const std::array<DimSize_t, DIM> &kernel
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> AvgPooling(
     DimSize_t const (&kernel_dims)[DIM],
-    const char *name = nullptr,
+    const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0)) {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 07af5fa8416cf726e209cd9e690af345b321fb0e..6861c1359737f3f344f0c7d9b2d12c9ff35b88ad 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -144,7 +144,7 @@ public:
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> BatchNorm(const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
-                                       const char *name = nullptr) {
+                                       const std::string& name = "") {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by BatchNorm, not supported");
     auto batchNorm = std::make_shared<Node>(std::make_shared<BatchNorm_Op<static_cast<DimIdx_t>(DIM)>>(epsilon, momentum), name);
     addProducer(batchNorm, 1, std::array<DimSize_t,0>({}), "scale");
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index d6efba2cec6908ad58b9feea5e53807c7227cc88..1edc94b96763cc163646037a8bd069023511df67 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -166,7 +166,7 @@ template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> Conv(DimSize_t in_channels,
                                   DimSize_t out_channels,
                                   const std::array<DimSize_t, DIM> &kernel_dims,
-                                  const char *name = nullptr,
+                                  const std::string& name = "",
                                   const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                   const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                   const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
@@ -184,7 +184,7 @@ inline std::shared_ptr<Node> Conv(
     DimSize_t in_channels,
     DimSize_t out_channels,
     DimSize_t const (&kernel_dims)[DIM],
-    const char *name = nullptr,
+    const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index a3b7fbf3b21a5b3fd9e532e0cc19cebd46e5d022..95a2ff55b70dbed9299fb3dca98fb9b0e700d210 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -165,7 +165,7 @@ class ConvDepthWise_Op : public Operator,
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &kernel_dims,
-                                           const char *name = nullptr,
+                                           const std::string& name = "",
                                            const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
                                            const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
                                            const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
@@ -180,7 +180,7 @@ inline std::shared_ptr<Node> ConvDepthWise(const std::array<DimSize_t, DIM> &ker
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ConvDepthWise(
     DimSize_t const (&kernel_dims)[DIM],
-    const char *name = nullptr,
+    const std::string& name = "",
     const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1),
     const std::array<DimSize_t, (DIM<<1)> &padding_dims = create_array<DimSize_t,(DIM<<1)>(0),
     const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1)) {
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 6e4c54a030c108c29c08a8f5dfdc24d084ccc91c..db92dc9c735416d250fa32e2f9010b21b8f808c0 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -139,7 +139,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const char* name = nullptr) {
+inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     auto fc = std::make_shared<Node>(std::make_shared<FC_Op>(out_channels, noBias), name);
     addProducer(fc, 1, {out_channels, 1}, "w");
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 94cdc6727de7078ca4fc3bb0940a01731feb92cc..12fb7e16741e9f7ad96d51b0b847b91265c3a7d2 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -163,7 +163,7 @@ class GenericOperator_Op
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
  */
 inline std::shared_ptr<Node> GenericOperator(const char *type, IOIndex_t nbDataIn, IOIndex_t nbIn, IOIndex_t nbOut,
-                                             const char *name = nullptr) {
+                                             const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<GenericOperator_Op>(type, nbDataIn, nbIn, nbOut), name);
 }
 }  // namespace Aidge
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 64587d51de784082da455eb64aa5bbe175773b5d..1dff2550a42245351afab5b8bb1a708a8d0d8c0b 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -117,7 +117,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const char* name = nullptr) {
+inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<LeakyReLU_Op>(negativeSlope), name);
 }
diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp
index b44e8a9b9540e287ff35af1c9642c8202fd096d0..639b366912060b3e085510f312d94568e6b65f03 100644
--- a/include/aidge/operator/Matmul.hpp
+++ b/include/aidge/operator/Matmul.hpp
@@ -129,7 +129,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const char* name = nullptr) {
+inline std::shared_ptr<Node> Matmul(DimSize_t out_channels, const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     auto matmul = std::make_shared<Node>(std::make_shared<Matmul_Op>(out_channels), name);
     addProducer(matmul, 1, {1, out_channels}, "w");
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1f77400ce8a8ef727ea9e0a7d12477c6519ea2df..acdc69b69ab86b25a11d889980b9236e41928316 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -113,32 +113,32 @@ public:
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const char *name = nullptr) {
+inline std::shared_ptr<Node> Producer(const std::array<DimSize_t, DIM> &dims, const std::string& name = "") {
   static_assert(DIM<=MaxDim,"Too many tensor dimensions required by Producer, not supported");
   return std::make_shared<Node>(std::make_shared<Producer_Op>(dims), name);
 }
 
 template <std::size_t DIM>
-inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const char *name = nullptr) {
+inline std::shared_ptr<Node> Producer(DimSize_t const (&dims)[DIM], const std::string& name = "") {
   return Producer(to_array(dims), name);
 }
 
-inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const char *name = nullptr) {
+inline std::shared_ptr<Node> Producer(const std::shared_ptr<Tensor> tensor, const std::string& name = "") {
   return std::make_shared<Node>(std::make_shared<Producer_Op>(tensor), name);
 }
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const char* extension) {
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, const std::array<DimSize_t, DIM>& dims, const std::string& extension) {
     assert(inputIdx != gk_IODefaultIndex);
     static_assert(DIM<=MaxDim,"Too many tensor dimensions required by addProducer, not supported");
-    const char* prodName = otherNode->name().empty() ? nullptr : (otherNode->name() + std::string("_") + std::string(extension)).c_str();
+    const std::string prodName = (otherNode->name().empty()) ? "" : (otherNode->name() + std::string("_") + extension);
     auto prod = Producer(dims, prodName);
     prod->addChild(otherNode, 0, inputIdx);
     otherNode->getOperator()->associateInput(inputIdx, prod->getOperator()->getRawOutput(0));
 }
 
 template <std::size_t DIM>
-void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const char* extension) {
+void addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
     addProducer(otherNode, inputIdx, to_array(dims), extension);
 }
 } // namespace Aidge
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 3ea90462cf2b083a1a61ae39be06471093ec9f9f..141bd3ae12c7875a90d2549a24e5c141f3ff6aba 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -106,7 +106,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> ReLU(const char* name = nullptr) {
+inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<ReLU_Op>(), name);
 }
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 93eb262f703ca7eb385641c77df7ae7e79c00b96..64e713b331bbbbf612ee5102ba0ea82fb108350e 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -106,7 +106,7 @@ public:
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
 };
 
-inline std::shared_ptr<Node> Softmax(const char* name = nullptr) {
+inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
     return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name);
 }
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index d7099e3856d48262f0f4bbacf025f5a960a220fa..3efcf7c5345bbc835aeaf6dcbc416769b8654439 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -23,7 +23,7 @@ namespace Aidge {
 template <std::size_t NUM> void declare_Add(py::module &m) {
   py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "Add_Op", py::multiple_inheritance());
 
-  m.def("Add", &Add<NUM>, py::arg("name") = nullptr);
+  m.def("Add", &Add<NUM>, py::arg("name") = "");
 }
 
 void init_Add(py::module &m) {
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 66dadba7244a199bd4ca8a0dd814f20a8049a62f..ecbb743d33cc5750bc60aeed8e5207dcec0c23dc 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -37,10 +37,10 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         py::arg("stride_dims"),
         py::arg("padding_dims"));
   
-  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, 
-                                                                  const char* name,
-                                                                  std::vector<DimSize_t> &stride_dims,
-                                                                  std::vector<DimSize_t> &padding_dims) {
+  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -69,7 +69,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
         const DimSize_t (&padding_dims_array)[DIM<<1] = tmp_padding_dims_array;
         return AvgPooling<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array));
     }, py::arg("kernel_dims"),
-       py::arg("name") = nullptr,
+       py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0));
   
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 52578c55ac0e3e1112bdbedc15bbaa3e155d9b44..70d9bce003033e1264ac39764271773fa84c760f 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -24,7 +24,7 @@ template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
     py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, PyAbstractParametrizable>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
 
-    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = nullptr);
+    m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
 
 void init_BatchNorm(py::module &m) {
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 3cf5d818f9b6e3bdfaf9a2d0b74ec0480beb6967..7e366305f287e958ea7500695c1f3285908017b1 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -44,11 +44,11 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
   
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
-                                                         std::vector<DimSize_t>& kernel_dims,
-                                                         const char* name, 
-                                                         std::vector<DimSize_t> &stride_dims,
-                                                         std::vector<DimSize_t> &padding_dims,
-                                                         std::vector<DimSize_t> &dilation_dims) {
+                                                         const std::vector<DimSize_t>& kernel_dims,
+                                                         const std::string& name, 
+                                                         const std::vector<DimSize_t> &stride_dims,
+                                                         const std::vector<DimSize_t> &padding_dims,
+                                                         const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -87,7 +87,7 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
     }, py::arg("in_channels"),
        py::arg("out_channels"),
        py::arg("kernel_dims"),
-       py::arg("name") = nullptr,
+       py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index b64409bdbb5f094e85cb094017a6fb837893a2db..8a81e7ba184536cbd535db24519495400bce6fdb 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -39,11 +39,11 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         py::arg("padding_dims"),
         py::arg("dilation_dims"));
   
-  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](std::vector<DimSize_t>& kernel_dims, 
-                                                                  const char* name,
-                                                                  std::vector<DimSize_t> &stride_dims,
-                                                                  std::vector<DimSize_t> &padding_dims,
-                                                                  std::vector<DimSize_t> &dilation_dims) {
+  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+                                                                  const std::string& name,
+                                                                  const std::vector<DimSize_t> &stride_dims,
+                                                                  const std::vector<DimSize_t> &padding_dims,
+                                                                  const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
         // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
         if (kernel_dims.size() != DIM) {
@@ -80,7 +80,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
         const DimSize_t (&dilation_dims_array)[DIM] = tmp_dilation_dims_array;
         return ConvDepthWise<DIM>(to_array(kernel_dims_array), name, to_array(stride_dims_array), to_array(padding_dims_array), to_array(dilation_dims_array));
     }, py::arg("kernel_dims"),
-       py::arg("name") = nullptr,
+       py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("padding_dims") = std::vector<DimSize_t>(DIM<<1,0),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 82eaa0062b7db0e57da3d78d56e503e3a4beb19f..3b4137c6f208f96d256c72300437cc978658b84f 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -23,7 +23,7 @@ namespace Aidge {
 void declare_FC(py::module &m) {
   py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, PyAbstractParametrizable>(m, "FC_Op", py::multiple_inheritance());
 
-  m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = nullptr);
+  m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
 
 void init_FC(py::module &m) {
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 578d2ccd2ed143c3f9a67c0430c12aa7214cb8dc..ee3ee74c14e58dd5160cb041a123c329ab0bbb84 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -62,6 +62,6 @@ void init_GenericOperator(py::module& m) {
     });
 
     m.def("GenericOperator", &GenericOperator, py::arg("type"), py::arg("nbDataIn"), py::arg("nbIn"), py::arg("nbOut"),
-          py::arg("name") = nullptr);
+          py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 27a292f0baf2673f3d963f3c3b9a69892c4c6521..c062d93f5c40fe46336fe34f6d1664f24da07732 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -21,6 +21,6 @@ namespace Aidge {
 void init_LeakyReLU(py::module& m) {
     py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, PyAbstractParametrizable>(m, "LeakyReLU_Op", py::multiple_inheritance());
 
-    m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = nullptr);
+    m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index c81845ca5e5ba3674356d16db660f4e3550e9004..b6ae27289fabe1fe4dbeea60704a61373bc850cf 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -23,7 +23,7 @@ namespace Aidge {
 void declare_Matmul(py::module &m) {
   py::class_<Matmul_Op, std::shared_ptr<Matmul_Op>, Operator, PyAbstractParametrizable>(m, "Matmul_Op", py::multiple_inheritance());
 
-  m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = nullptr);
+  m.def("Matmul", &Matmul, py::arg("out_channels"), py::arg("name") = "");
 }
 
 void init_Matmul(py::module &m) {
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 5757891a30c5b40dcfa5ff99b1f06e00376f475a..ea9880800059e8993996e67138f89419c165fc4f 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -25,7 +25,7 @@ namespace Aidge {
 template <DimIdx_t DIM>
 void declare_Producer(py::module &m) {
     // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
-    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const char*)>(&Producer), py::arg("dims"), py::arg("name") = nullptr);
+    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&)>(&Producer), py::arg("dims"), py::arg("name") = "");
     
 }
 
@@ -36,7 +36,7 @@ void init_Producer(py::module &m) {
         "ProducerOp", 
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims);
-    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const char*)>(&Producer), py::arg("tensor"), py::arg("name") = nullptr);
+    m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
     
     declare_Producer<1>(m);
     declare_Producer<2>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index e0d34d5a91a4ed1fcb8507198eb222b2d02e4e26..820589d76507b39ca65ac2397614aabd1221fe3e 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -20,6 +20,6 @@ namespace Aidge {
 void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLU_Op", py::multiple_inheritance());
 
-    m.def("ReLU", &ReLU, py::arg("name") = nullptr);
+    m.def("ReLU", &ReLU, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 13ba96ade4f5c5d132274e457efa5b4edcd3dc78..72ac1107181c1d7e2f578e31a965636dbb5c111b 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -21,6 +21,6 @@ namespace Aidge {
 void init_Softmax(py::module& m) {
     py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "Softmax_Op", py::multiple_inheritance());
 
-    m.def("Softmax", &Softmax, py::arg("name") = nullptr);
+    m.def("Softmax", &Softmax, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/src/graph/Connector.cpp b/src/graph/Connector.cpp
index f189b92b24cc5529ae8fb6d8c9faac97e296a92c..cd2ceff8b58076a5054269e4676120b94c8b5beb 100644
--- a/src/graph/Connector.cpp
+++ b/src/graph/Connector.cpp
@@ -39,7 +39,7 @@ std::shared_ptr<Aidge::GraphView> Aidge::generateGraph(std::vector<Connector> ct
             graph->add(nodesToAdd.back());  // only add, connection already done
                                             // between nodes
             std::vector<std::shared_ptr<Node>> parents = nodesToAdd.back()->getParents();
-            std::set<std::shared_ptr<Node>> alreadyAdded = graph->getNodes();
+            const std::set<std::shared_ptr<Node>>& alreadyAdded = graph->getNodes();
             for (std::shared_ptr<Node> parent : parents) {
                 if (alreadyAdded.find(parent) == alreadyAdded.end()) {
                     buffer.push_back(parent);
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index ad412f5b86d9cf0dee0823736548baeb7c7320a7..a0641032281c6bedb4459a0d08da1193d6375129 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -464,13 +464,13 @@ Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const {
 
 
 std::shared_ptr<Aidge::Node>
-Aidge::GraphView::getNode(const char *nodeName) const {
+Aidge::GraphView::getNode(const std::string& nodeName) const {
   std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
-      mNodeRegistry.find(std::string(nodeName));
+      mNodeRegistry.find(nodeName);
   if (it != mNodeRegistry.end()) {
     return it->second;
   } else {
-    printf("No Node named %s in the current GraphView.\n", nodeName);
+    printf("No Node named %s in the current GraphView.\n", nodeName.c_str());
     exit(-1);
   }
 }
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index b3db5befbdc8299114514d8d554d439bffc5eae2..5fcc0e1139d8ccd9368eaba90231fb12370e761e 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -17,8 +17,8 @@
 #include <vector>
 #include "aidge/utils/Types.h"
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, const char *name)
-    : mName((name == nullptr) ? std::string() : std::string(name)),
+Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
+    : mName(name),
       mOperator(op),
       mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()), nullptr)),
       mChildren(std::vector<std::vector<std::weak_ptr<Node>>>(static_cast<std::size_t>(op->nbOutputs()),
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index fce46397ffd286a2ddbe254752b241578415e3d8..a8069fda9a3a2f4cbb999eeb3974230767069fb8 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -20,7 +20,7 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/utils/Types.h"
 
-void drawProgressBar(double progress, int barWidth, const char* additionalInfo = nullptr) {
+void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") {
     putchar('[');
     int pos = static_cast<int>(barWidth * progress);
     for (int i = 0; i < barWidth; ++i) {
@@ -29,7 +29,7 @@ void drawProgressBar(double progress, int barWidth, const char* additionalInfo =
         else
             putchar(' ');
     }
-    printf("] %d%% | %s\r", static_cast<int>(progress * 100), (additionalInfo ? additionalInfo : ""));
+    printf("] %d%% | %s\r", static_cast<int>(progress * 100), additionalInfo.c_str());
     fflush(stdout);
 }
 
@@ -122,8 +122,7 @@ void Aidge::SequentialScheduler::forward(bool frowardDims, bool verbose) {
             else
                 drawProgressBar(static_cast<float>(computationOver.size()) / static_cast<float>(computationNumber), 50,
                                 (std::string("running ") + runnable->type() + "_" +
-                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get())))
-                                        .c_str());
+                                 std::to_string(reinterpret_cast<uintptr_t>(runnable.get()))));
             const auto tStart = std::chrono::high_resolution_clock::now();
             runnable->forward();
             const auto tEnd = std::chrono::high_resolution_clock::now();