From 2d4f518a7611961890267f1d322ab87b78a8a7c5 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 17 Oct 2023 08:50:48 +0000
Subject: [PATCH] [Operator] Add getInputsName & getOutputsName methods.

---
 include/aidge/operator/Add.hpp                   |  6 ++++++
 include/aidge/operator/AvgPooling.hpp            |  6 ++++++
 include/aidge/operator/BatchNorm.hpp             |  6 ++++++
 include/aidge/operator/Conv.hpp                  |  6 ++++++
 include/aidge/operator/ConvDepthWise.hpp         |  6 ++++++
 include/aidge/operator/FC.hpp                    |  8 +++++++-
 include/aidge/operator/LeakyReLU.hpp             |  6 ++++++
 include/aidge/operator/MatMul.hpp                |  6 ++++++
 include/aidge/operator/MaxPooling.hpp            |  6 ++++++
 include/aidge/operator/Operator.hpp              |  6 ++++++
 include/aidge/operator/Pad.hpp                   |  6 ++++++
 include/aidge/operator/Producer.hpp              |  8 +++++++-
 include/aidge/operator/ReLU.hpp                  |  6 ++++++
 include/aidge/operator/Scaling.hpp               |  6 ++++++
 include/aidge/operator/Softmax.hpp               |  6 ++++++
 python_binding/operator/pybind_Add.cpp           |  4 +++-
 python_binding/operator/pybind_AvgPooling.cpp    | 16 +++++++++-------
 python_binding/operator/pybind_BatchNorm.cpp     |  4 +++-
 python_binding/operator/pybind_Conv.cpp          | 14 ++++++++------
 python_binding/operator/pybind_ConvDepthWise.cpp | 14 ++++++++------
 python_binding/operator/pybind_FC.cpp            |  4 +++-
 python_binding/operator/pybind_LeakyReLU.cpp     |  4 +++-
 python_binding/operator/pybind_Matmul.cpp        |  4 +++-
 python_binding/operator/pybind_MaxPooling.cpp    | 16 +++++++++-------
 python_binding/operator/pybind_Producer.cpp      |  4 +++-
 python_binding/operator/pybind_ReLU.cpp          |  4 +++-
 python_binding/operator/pybind_Softmax.cpp       |  4 +++-
 27 files changed, 150 insertions(+), 36 deletions(-)

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 57403270d..65c7e8ce0 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -162,6 +162,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return NUM; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return NUM; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+        static const std::vector<std::string> getInputsName(){
+        return {"data_input_0", "data_input_n"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::size_t NUM>
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index be15ceb66..36de6c11a 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -157,6 +157,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 75c901a1f..da7360c8b 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -160,6 +160,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 5; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "scale", "shift", "mean", "variance"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <DimSize_t DIM>
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c8e229cbb..5e6374c48 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -177,6 +177,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 3; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 55a48a978..ec8ce2b3e 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -176,6 +176,12 @@ class ConvDepthWise_Op : public Operator,
     inline IOIndex_t nbInputs() const noexcept override final { return 3; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 8dea03ca0..b949527c5 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -158,6 +158,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 3; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight", "bias"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> FC(DimSize_t out_channels, bool noBias = false, const std::string& name = "") {
@@ -175,4 +181,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 7a6fc4cbb..40d9959b3 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -137,6 +137,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+        static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "") {
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index eec7072ff..eed1ec045 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -148,6 +148,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 2; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "weight"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> MatMul(DimSize_t out_channels, const std::string& name = "") {
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index bf802238c..e261fb4b8 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -158,6 +158,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index e3544171d..a99e4e8ed 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -116,6 +116,12 @@ public:
     virtual IOIndex_t nbInputs() const noexcept = 0;
     virtual IOIndex_t nbDataInputs() const noexcept = 0;
     virtual IOIndex_t nbOutputs() const noexcept = 0;
+      static const std::vector<std::string> getInputsName(){
+        return {};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {};
+    }
 };
 } // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index deae0e6b8..ddc611a0f 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -160,6 +160,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 07d932bd0..529a37c06 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -79,7 +79,7 @@ public:
      * @brief Set the Output Tensor of the Producer operator.
      * This method will create a copy of the Tensor.
      *
-     * @param newOutput Tensor containing the values to copy 
+     * @param newOutput Tensor containing the values to copy
      */
     void setOutputTensor(const Tensor& newOutput) {
         *mOutput = newOutput;
@@ -132,6 +132,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 0; };
     inline IOIndex_t nbDataInputs() const noexcept override final { return 0; };
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; };
+    static const std::vector<std::string> getInputsName(){
+        return {""};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 
 public:
   void forward() override final {
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 0a7ec3b4f..07f79f649 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -125,6 +125,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index f18abaf32..353666fb3 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -146,6 +146,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f, const std::string& name = "") {
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 095ea0aad..e6c9869a5 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -125,6 +125,12 @@ public:
     inline IOIndex_t nbInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbDataInputs() const noexcept override final { return 1; }
     inline IOIndex_t nbOutputs() const noexcept override final { return 1; }
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
 };
 
 inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index ab8b4cf7b..0b2323c5c 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -20,7 +20,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <std::size_t NUM> void declare_Add(py::module &m) {
-  py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "Add_Op", py::multiple_inheritance());
+  py::class_<Add_Op<NUM>, std::shared_ptr<Add_Op<NUM>>, Operator>(m, "AddOp", py::multiple_inheritance())
+  .def("get_inputs_name", &Add_Op<NUM>::getInputsName)
+  .def("get_outputs_name", &Add_Op<NUM>::getOutputsName);
 
   m.def("Add", &Add<NUM>, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 5820e94c5..a2eda1ab4 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -32,13 +32,15 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"));
-  
-  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+        py::arg("stride_dims"))
+  .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
+  .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName);
+
+  m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -59,7 +61,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
-  
+
 }
 
 
@@ -67,10 +69,10 @@ void init_AvgPooling(py::module &m) {
   declare_AvgPoolingOp<1>(m);
   declare_AvgPoolingOp<2>(m);
   declare_AvgPoolingOp<3>(m);
- 
+
   // FIXME:
   // m.def("AvgPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&AvgPooling));
 }
 } // namespace Aidge
-#endif
\ No newline at end of file
+#endif
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index f43381fec..cabaa2edd 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,7 +21,9 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNorm_Op" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance());
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Operator, Attributes>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName);
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 91ede7b6a..aabbfd3dd 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -37,16 +37,19 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
         py::arg("out_channels"),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"));
-  
+        py::arg("dilation_dims"))
+    .def("get_inputs_name", &Conv_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
+    ;
+
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
                                                          const std::vector<DimSize_t>& kernel_dims,
-                                                         const std::string& name, 
+                                                         const std::string& name,
                                                          const std::vector<DimSize_t> &stride_dims,
                                                          const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -78,7 +81,6 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
-  
 }
 
 
@@ -86,7 +88,7 @@ void init_Conv(py::module &m) {
   declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
   declare_ConvOp<3>(m);
- 
+
   // FIXME:
   // m.def("Conv1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&Conv));
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index 446bcdcce..809a7d6e7 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -34,14 +34,16 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
         py::arg("stride_dims"),
-        py::arg("dilation_dims"));
-  
-  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+        py::arg("dilation_dims"))
+  .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
+  .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName);
+
+  m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
                                                                   const std::vector<DimSize_t> &dilation_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -71,7 +73,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1),
        py::arg("dilation_dims") = std::vector<DimSize_t>(DIM,1));
-  
+
 }
 
 
@@ -79,7 +81,7 @@ void init_ConvDepthWise(py::module &m) {
   declare_ConvDepthWiseOp<1>(m);
   declare_ConvDepthWiseOp<2>(m);
   declare_ConvDepthWiseOp<3>(m);
- 
+
   // FIXME:
   // m.def("ConvDepthWise1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&ConvDepthWise));
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index 4b9d61d08..c6a1c7000 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -20,7 +20,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_FC(py::module &m) {
-  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FC_Op", py::multiple_inheritance());
+  py::class_<FC_Op, std::shared_ptr<FC_Op>, Operator, Attributes>(m, "FCOp", py::multiple_inheritance())
+  .def("get_inputs_name", &FC_Op::getInputsName)
+  .def("get_outputs_name", &FC_Op::getOutputsName);
 
   m.def("FC", &FC, py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index cae8a88ba..af7689f0e 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -18,7 +18,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_LeakyReLU(py::module& m) {
-    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLU_Op", py::multiple_inheritance());
+    py::class_<LeakyReLU_Op, std::shared_ptr<LeakyReLU_Op>, Operator, Attributes>(m, "LeakyReLUOp", py::multiple_inheritance())
+    .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
+    .def("get_outputs_name", &LeakyReLU_Op::getOutputsName);
 
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 2f7385500..fdb51b24a 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -20,7 +20,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void declare_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMul_Op", py::multiple_inheritance());
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Operator, Attributes>(m, "MatMulOp", py::multiple_inheritance())
+  .def("get_inputs_name", &MatMul_Op::getInputsName)
+  .def("get_outputs_name", &MatMul_Op::getOutputsName);
 
   m.def("MatMul", &MatMul, py::arg("out_channels"), py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index a930b496b..84313f298 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -32,13 +32,15 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
         py::arg("kernel_dims"),
-        py::arg("stride_dims"));
-  
-  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims, 
+        py::arg("stride_dims"))
+  .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
+  .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName);
+
+  m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
         // Lambda function wrapper because PyBind fails to convert const array.
-        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array. 
+        // So we use a vector that we convert in this function to a const DimeSize_t [DIM] array.
         if (kernel_dims.size() != DIM) {
             throw std::runtime_error("kernel_dims size [" + std::to_string(kernel_dims.size()) + "] does not match DIM [" + std::to_string(DIM) +"]");
         }
@@ -59,7 +61,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
     }, py::arg("kernel_dims"),
        py::arg("name") = "",
        py::arg("stride_dims") = std::vector<DimSize_t>(DIM,1));
-  
+
 }
 
 
@@ -67,10 +69,10 @@ void init_MaxPooling(py::module &m) {
   declare_MaxPoolingOp<1>(m);
   declare_MaxPoolingOp<2>(m);
   declare_MaxPoolingOp<3>(m);
- 
+
   // FIXME:
   // m.def("MaxPooling1D", static_cast<NodeAPI(*)(const char*, int, int, int const
   // (&)[1])>(&MaxPooling));
 }
 } // namespace Aidge
-#endif
\ No newline at end of file
+#endif
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 1c62cd0ad..107b7ba00 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -35,7 +35,9 @@ void init_Producer(py::module &m) {
         "ProducerOp",
         py::multiple_inheritance())
     .def("dims", &Producer_Op::dims)
-    .def("set_output_tensor", &Producer_Op::setOutputTensor);
+    .def("set_output_tensor", &Producer_Op::setOutputTensor)
+    .def("get_inputs_name", &Producer_Op::getInputsName)
+    .def("get_outputs_name", &Producer_Op::getOutputsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&)>(&Producer), py::arg("tensor"), py::arg("name") = "");
 
     declare_Producer<1>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 820589d76..dbcb483e8 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -18,7 +18,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_ReLU(py::module& m) {
-    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLU_Op", py::multiple_inheritance());
+    py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, Operator>(m, "ReLUOp", py::multiple_inheritance())
+    .def("get_inputs_name", &ReLU_Op::getInputsName)
+    .def("get_outputs_name", &ReLU_Op::getOutputsName);
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 72ac11071..8e50ab7c8 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -19,7 +19,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "Softmax_Op", py::multiple_inheritance());
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, Operator>(m, "SoftmaxOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Softmax_Op::getInputsName)
+    .def("get_outputs_name", &Softmax_Op::getOutputsName);
 
     m.def("Softmax", &Softmax, py::arg("name") = "");
 }
-- 
GitLab