diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 57403270d44d66e87675a3cadb227342c0cacd91..65c7e8ce0e47bd470e2a1499a682ed2f2c8c2dbc 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -162,6 +162,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+        static const std::vector<std::string> getInputsName(){
+        return {"data_input_0", "data_input_n"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::size_t NUM>
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index be15ceb66ce32b98bfafab4af4213eee163dfbf9..36de6c11a50692cc53ce9a70af4bef81ab0924bd 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -157,6 +157,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 75c901a1f00d26cc8b65192815c6fe93575723f0..da7360c8ba3816cdfe1d2d00f80b08808a80f961 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -160,6 +160,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 5; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "scale", "shift", "mean", "variance"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c8e229cbb3815ae7bd24064e862dc407b327febd..5e6374c488a34fc8b29a5f841f42b8f44d2fc7a6 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -177,6 +177,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 3; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 55a48a978f4bd515f31cff4feae79c3ab262b0e0..ec8ce2b3e1e2961658bd5fce7342fe5a31b7bb5b 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -176,6 +176,12 @@ class ConvDepthWise_Op : public Operator,
     inline IOIndex_t nbInputs() const noexcept override final { return 3; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 8dea03ca0b7cd9ce7543fa35d082bc5164365b7b..b949527c51b9330077dd3bd8f8b4bf1f1b9d719c 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -158,6 +158,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 3; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
@@ -175,4 +181,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 7a6fc4cbb8648b04aa42158c34d022b11775b84c..40d9959b3802dcbe337adeafa9643bf0682df64b 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -137,6 +137,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+        static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index eec7072ff2739c80bb327f0e987e7d3712ba217e..eed1ec04535aa5896aa3d01a27d8023d37a42183 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -148,6 +148,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 2; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index bf802238c2dba8d13a0bb230750f3b882b6c09f5..e261fb4b8c6d1448cca010f6aa11214bf6597f67 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -158,6 +158,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index e3544171de9b97a2795f1d936adfeff341bd32dc..a99e4e8ed37aeaa647da1dcaaa994b070901129b 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -116,6 +116,12 @@ public:
     virtual IOIndex_t nbInputs() const noexcept = 0;
     virtual IOIndex_t nbDataInputs() const noexcept = 0;
     virtual IOIndex_t nbOutputs() const noexcept = 0;
+      static const std::vector<std::string> getInputsName(){
+        return {};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {};
+    }
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index deae0e6b8c5a91e5c10e7655549a4e46ac90eb0b..ddc611a0fc6e3604ad6cb1949142d26625ae778a 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -160,6 +160,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 07d932bd0501832c78df2c3530f657b57251183f..529a37c063567e2e09367176437f212c69b2bf40 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -79,7 +79,7 @@ public:
      * @brief Set the Output Tensor of the Producer operator.
      * This method will create a copy of the Tensor.
      *
-     * @param newOutput Tensor containing the values to copy 
+     * @param newOutput Tensor containing the values to copy
      */
     void setOutputTensor(const Tensor& newOutput) {
         *mOutput = newOutput;
@@ -132,6 +132,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 0; };
     inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
+    static const std::vector<std::string> getInputsName(){
+        return {""};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 
 public:
   void forward() override final {
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 0a7ec3b4fd9b51dbdb7cc95cd111337dad8553c4..07f79f64933b3eec9e98a5ad13a4afbda9aed588 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -125,6 +125,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index f18abaf320620bbffec646d1bbb752b834487dd4..353666fb3950d034a7dbe8ec1d3ebdb312679f95 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -146,6 +146,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 095ea0aadb9b9684a472b8a437ace6f5151bc4cf..e6c9869a50d1d142c20525ce2ccfc4f1de5088ed 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -125,6 +125,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index ab8b4cf7b91d5eea2db5245a8c5122ab004b4766..0b2323c5cfb660415ec3ae009beaa7aa78afca0b 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -20,7 +20,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <std::size_t NUM> void declare_Add(py::module &m) {
-  py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "Add_Op", py::multiple_inheritance());
+  py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "AddOp", py::multiple_inheritance())
+  .def("get_inputs_name", &Add_Op<NUM>::getInputsName)
+  .def("get_outputs_name", &Add_Op<NUM>::getOutputsName);
 
   m.def("Add", &Add<NUM>, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 5820e94c5cbd24150a4e81b0db34328ac35e1bf5..a2eda1ab44f75b2f6a63d0d4d4f19cbee00b07c7 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -32,13 +32,15 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"));
-  
-  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+        py::arg("stride_dims"))
+  .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
+  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
+
+  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -59,7 +61,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
-  
+
 }
 
 
@@ -67,10 +69,10 @@ void init_AvgPooling(py::module &m) {
   declare_AvgPoolingOp<1>(m);
   declare_AvgPoolingOp<2>(m);
   declare_AvgPoolingOp<3>(m);
- 
+
   // FIXME:
   // m.def("AvgPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&AvgPooling));
 }
 } // namespace Aidge
-#endif
\ No newline at end of file
+#endif
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index f43381fecc689a292e166c4da40ea0cb4842c9e6..cabaa2edd7053718160fa5013492d1914ee4cf16 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,7 +21,9 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 91ede7b6a289f3def2a9c8261ff04d2ab9836cdd..aabbfd3dd9aecd44123962443458de56c0b7071c 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -37,16 +37,19 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         py::arg("out_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"));
-  
+        py::arg("dilation_dims"))
+    .def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+    ;
+
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
                                                          const std::vector<DimSize_t>& kernel_dims,
-                                                         const std::string& name, 
+                                                         const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -78,7 +81,6 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
-  
 }
 
 
@@ -86,7 +88,7 @@ void init_Conv(py::module &m) {
   declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
   declare_ConvOp<3>(m);
- 
+
   // FIXME:
   // m.def("Conv1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&Conv));
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 446bcdcceb3ba805223fc22e6fc19a22dcf354ec..809a7d6e797651ed8c490aa9a886c31c7e4e6651 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -34,14 +34,16 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"));
-  
-  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+        py::arg("dilation_dims"))
+  .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
+  .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
+
+  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -71,7 +73,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
-  
+
 }
 
 
@@ -79,7 +81,7 @@ void init_ConvDepthWise(py::module &m) {
   declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
   declare_ConvDepthWiseOp<3>(m);
- 
+
   // FIXME:
   // m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&ConvDepthWise));
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 4b9d61d082ebed4d426b41efa071d3943f83d231..c6a1c70000e3e6d604a6652716667efa1c18e956 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -20,7 +20,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FC_Op", py::multiple_inheritance());
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FCOp", py::multiple_inheritance())
+  .def("get_inputs_name", &FC_Op::getInputsName)
+  .def("get_outputs_name", &FC_Op::getOutputsName);
 
   m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index cae8a88bab7b59189dfbc6528cd653f1c97cb73a..af7689f0e64dd4ca8f798dcb34ea968972ace464 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -18,7 +18,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLU_Op", py::multiple_inheritance());
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
+    .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
+    .def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 2f738550041bcdb1ae809d68fa24fdf5a72e9164..fdb51b24a87ce358c1e7808873ebc569ca2227c8 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -20,7 +20,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMul_Op", py::multiple_inheritance());
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMulOp", py::multiple_inheritance())
+  .def("get_inputs_name", &MatMul_Op::getInputsName)
+  .def("get_outputs_name", &MatMul_Op::getOutputsName);
 
   m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index a930b496b49280629d71725cee79aea4d850358e..84313f298f90298726773630602e90a5ab3d3efd 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -32,13 +32,15 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"));
-  
-  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+        py::arg("stride_dims"))
+  .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
+  .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
+
+  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -59,7 +61,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
-  
+
 }
 
 
@@ -67,10 +69,10 @@ void init_MaxPooling(py::module &m) {
   declare_MaxPoolingOp<1>(m);
   declare_MaxPoolingOp<2>(m);
   declare_MaxPoolingOp<3>(m);
- 
+
   // FIXME:
   // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&MaxPooling));
 }
 } // namespace Aidge
-#endif
\ No newline at end of file
+#endif
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 1c62cd0adf6b8712073ec0674754ce7c8c2014a5..107b7ba00e4077d9f7c215257bf7fd46629481c1 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -35,7 +35,9 @@ void init_Producer(py::module &m) {
         "ProducerOp",
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims)
-    .def("set_output_tensor", &Producer_Op::setOutputTensor);
+    .def("set_output_tensor", &Producer_Op::setOutputTensor)
+    .def("get_inputs_name", &Producer_Op::getInputsName)
+    .def("get_outputs_name", &Producer_Op::getOutputsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
 
     declare_Producer<1>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 820589d76507b39ca65ac2397614aabd1221fe3e..dbcb483e8089373bc8599c2d09fed00049e2a2ac 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -18,7 +18,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_ReLU(py::module& m) {
-    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLU_Op", py::multiple_inheritance());
+    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLUOp", py::multiple_inheritance())
+    .def("get_inputs_name", &ReLU_Op::getInputsName)
+    .def("get_outputs_name", &ReLU_Op::getOutputsName);
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 72ac1107181c1d7e2f578e31a965636dbb5c111b..8e50ab7c83bf43285b357cb803c0ce3eb42f4cc7 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -19,7 +19,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "Softmax_Op", py::multiple_inheritance());
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "SoftmaxOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Softmax_Op::getInputsName)
+    .def("get_outputs_name", &Softmax_Op::getOutputsName);
 
     m.def("Softmax", &Softmax, py::arg("name") = "");
 }