diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index f0b09ba05254fc3f55c1d2cd658330a5cc8d097b..7d2600fcec0bc255d5eb04fb723b193a91db31a3 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -71,6 +71,7 @@ class ProducerCPP(ExportNode):
 
     def __init__(self, node):
         super().__init__(node)
+        self.constant = self.operator.attr.constant
         self.values = np.array(self.operator.get_output(0))
 
         if len(self.values.shape) == 4: # Note: export in HWC
@@ -125,7 +126,7 @@ class ReLUCPP(ExportNode):
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "activation_forward.jinja"),
             name=self.name,
-            input_name=f"{self.name}_0" if self.inputs[0] is None else self.inputs[0].name(),
+            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
             output_name=self.name
         ))
         return list_actions
@@ -136,9 +137,9 @@ class ConvCPP(ExportNode):
     def __init__(self, node):
         super().__init__(node)
 
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
-        self.dilation = node.get_operator().get_attr("DilationDims")
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
+        self.dilation = node.get_operator().attr.dilation_dims
 
         # No padding with Conv
         # Use PaddedConv to add padding attribute
@@ -190,7 +191,7 @@ class ConvCPP(ExportNode):
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja"),
             name=self.name,
-            input_name=f"{self.name}_0" if self.inputs[0] is None else self.inputs[0].name(),
+            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
             output_name=self.name,
             weights_name=self.inputs[1].name(),
             biases_name=self.inputs[2].name()
@@ -205,11 +206,11 @@ class PaddedConvCPP(ConvCPP):
 
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
-                self.padding = n.get_operator().get_attr("BeginEndBorders")
+                self.padding = n.get_operator().attr.begin_end_borders
             if n.type() == "Conv":
-                self.kernel = n.get_operator().get_attr("KernelDims")
-                self.stride = n.get_operator().get_attr("StrideDims")
-                self.dilation = n.get_operator().get_attr("DilationDims")
+                self.kernel = n.get_operator().attr.kernel_dims
+                self.stride = n.get_operator().attr.stride_dims
+                self.dilation = n.get_operator().attr.dilation_dims
 
         if len(self.inputs_dims[0]) == 4:
             # if dims == [batch, nb_channels, height, width]
@@ -250,8 +251,8 @@ class AddCPP(ExportNode):
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
             name=self.name,
-            inputs1_name=f"{self.name}_0" if self.inputs[0] is None else self.inputs[0].name(),
-            inputs2_name=f"{self.name}_0" if self.inputs[1] is None else self.inputs[1].name(),
+            inputs1_name=self.parents[0].name() if self.parents[0] else self.name + "_input1",
+            inputs2_name=self.parents[1].name() if self.parents[1] else self.name + "_input2",
             output_name=self.name
         ))
         return list_actions
@@ -283,8 +284,8 @@ class SubCPP(ExportNode):
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja"),
             name=self.name,
-            inputs1_name=f"{self.name}_0" if self.inputs[0] is None else self.inputs[0].name(),
-            inputs2_name=f"{self.name}_1" if self.inputs[1] is None else self.inputs[1].name(),
+            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
+            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
             output_name=self.name
         ))
         return list_actions
@@ -296,10 +297,10 @@ class MaxPoolCPP(ExportNode):
         super().__init__(node)
         for n in self.operator.get_micro_graph().get_nodes():
             if n.type() == "Pad":
-                self.padding = n.get_operator().get_attr("BeginEndBorders")
+                self.padding = n.get_operator().attr.begin_end_borders
             if n.type() == "MaxPooling":
-                self.kernel = n.get_operator().get_attr("KernelDims")
-                self.stride = n.get_operator().get_attr("StrideDims")
+                self.kernel = n.get_operator().attr.kernel_dims
+                self.stride = n.get_operator().attr.stride_dims
 
         if len(self.inputs_dims[0]) == 4:
             # if dims == [batch, nb_channels, height, width]
@@ -342,6 +343,8 @@ class MaxPoolCPP(ExportNode):
             str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"),
             name=self.name,
             input_name=f"{self.name}_0" if self.inputs[0] is None else self.inputs[0].name(),
+            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
+            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
             output_name=self.name
         ))
         return list_actions
@@ -351,8 +354,8 @@ class MaxPoolCPP(ExportNode):
     def __init__(self, node):
         super().__init__(node)
 
-        self.kernel = node.get_operator().get_attr("KernelDims")
-        self.stride = node.get_operator().get_attr("StrideDims")
+        self.kernel = node.get_operator().attr.kernel_dims
+        self.stride = node.get_operator().attr.stride_dims
 
         # No padding with MaxPooling
         # Use PaddedMaxPooling to add padding attribute
@@ -398,7 +401,7 @@ class MaxPoolCPP(ExportNode):
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja"),
             name=self.name,
-            input_name=f"{self.name}_0" if self.inputs[0] is None else self.inputs[0].name(),
+            input_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input",
             output_name=self.name
         ))
         return list_actions
@@ -451,11 +454,10 @@ class FcCPP(ExportNode):
     def forward(self, list_actions:list):
         if not self.is_last:
             list_actions.append(set_up_output(self.name, "float"))
-
         list_actions.append(generate_str(
             str(ROOT / "templates" / "kernel_forward" / "fullyconnected_forward.jinja"),
             name=self.name,
-            inputs_name=f"{self.name}_0" if self.inputs[0] is None else self.inputs[0].name(),
+            inputs_name= self.inputs[0].name() if (self.inputs[0] is not None) else self.name + '_input',
             weights_name=self.inputs[1].name(),
             biases_name=self.inputs[2].name(),
             outputs_name=self.name
diff --git a/examples/add_custom_operator/add_custom_operator.ipynb b/examples/add_custom_operator/add_custom_operator.ipynb
index 0687e322831f045a17e729b2706ef518f3db6a7a..5477cdaefa00334a64472810e71c04031c06ab0f 100644
--- a/examples/add_custom_operator/add_custom_operator.ipynb
+++ b/examples/add_custom_operator/add_custom_operator.ipynb
@@ -89,7 +89,7 @@
     "model.add(input_node)\n",
     "\n",
     "# Configuration for the model + forward dimensions\n",
-    "model.compile(\"cpu\", aidge_core.DataType.Float32)"
+    "model.compile(\"cpu\", aidge_core.dtype.float32)"
    ]
   },
   {
@@ -231,9 +231,9 @@
     "    def __init__(self, node):\n",
     "        super().__init__(node)\n",
     "\n",
-    "        self.betas = self.operator.get_attr(\"betas\")\n",
+    "        self.betas: float = self.operator.get_attr(\"betas\")\n",
     "\n",
-    "    def export(self, export_folder:str, list_configs:list):\n",
+    "    def export(self, export_folder: str, list_configs: list[str]) -> list[str]:\n",
     "\n",
     "        copyfile(\"for_export/swish_kernel.hpp\",\n",
     "                 f\"{export_folder}/include/kernels/\")\n",
@@ -248,7 +248,7 @@
     "\n",
     "        return list_configs\n",
     "\n",
-    "    def forward(self, list_actions:list):\n",
+    "    def forward(self, list_actions:list[str]) -> list[str]:\n",
     "\n",
     "        if not self.is_last:\n",
     "            list_actions.append(set_up_output(self.name, \"float\"))\n",
@@ -296,7 +296,7 @@
    "outputs": [],
    "source": [
     "digit = np.load(\"digit.npy\")\n",
-    "cpp.generate_input_file(\"inputs\", digit.reshape(-1), \"myexport/inputs.h\")"
+    "cpp.generate_input_file(array_name=\"inputs\", array=digit.reshape(-1), folder_path=\"myexport\")"
    ]
   },
   {
@@ -334,7 +334,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.1.undefined"
   }
  },
  "nbformat": 4,
diff --git a/examples/export_LeNet/export_lenet_fp32.ipynb b/examples/export_LeNet/export_lenet_fp32.ipynb
index 24fe205a9d781bc6f6adfbe8c9a13a996ca674e7..b10ca6b9dbe4fecf1e56ec04ead0c6c774aaf66c 100644
--- a/examples/export_LeNet/export_lenet_fp32.ipynb
+++ b/examples/export_LeNet/export_lenet_fp32.ipynb
@@ -98,7 +98,7 @@
     "model.add(input_node)\n",
     "\n",
     "# Configuration for the model + forward dimensions\n",
-    "model.compile(\"cpu\", aidge_core.DataType.Float32)"
+    "model.compile(\"cpu\", aidge_core.dtype.float32)"
    ]
   },
   {
@@ -290,7 +290,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.16"
+   "version": "3.10.14"
   }
  },
  "nbformat": 4,
diff --git a/version.txt b/version.txt
index 6e8bf73aa550d4c57f6f35830f1bcdc7a4a62f38..6da28dde76d6550e3d398a70a9a8231256774669 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.1.0
+0.1.1
\ No newline at end of file