diff --git a/aidge_export_cpp/kernels/pad.hpp b/aidge_export_cpp/kernels/pad.hpp
index 158c9359c934f2735c12e47cbd186a4f30715554..4e83257c1152b1963dd4b0eefc912216a729de7d 100644
--- a/aidge_export_cpp/kernels/pad.hpp
+++ b/aidge_export_cpp/kernels/pad.hpp
@@ -6,32 +6,44 @@
 
 // Todo add border value and border type (Reflect, Constant, Wrap...) and add the two missing pad value (bottom and right)
 
-template<int NB_CHANNELS,
+template<int NB_BATCHES, int NB_CHANNELS,
          int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
          int NB_OUTPUTS,
          int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
-         int PADDING_Y, int PADDING_X,
+         int PADDING_TOP,
+         int PADDING_LEFT,
+         int PADDING_BOTTOM,
+         int PADDING_RIGHT,
          typename Input_T, typename Output_T>
 __attribute__((always_inline)) inline
-void convolution_forward(
+void pad_forward(
+    double borderValue,
     const Input_T* __restrict inputs,
     Output_T* __restrict outputs
     )
 {
-    const unsigned int oySize = CHANNELS_HEIGHT + PADDING_Y + PADDING_Y;
-    const unsigned int oxSize = CHANNELS_WIDTH + PADDING_X + PADDING_X;
-
-    for (unsigned int oy = 0; oy < oySize; ++oy) {
-        for (unsigned int ox = 0; ox < oxSize; ++ox) {
-            if (oy < PADDING_Y or oy >= CHANNELS_HEIGHT + PADDING_Y or ox < PADDING_X or ox >= CHANNELS_WIDTH + PADDING_X)
-            {
-                outputs[oy * oySize + ox] = 0.0f;
-            }
-            else
-            {
-                outputs[oy * oySize + ox] = inputs[(oy - PADDING_Y) * CHANNELS_HEIGHT + (ox - PADDING_X)];
-            }
+    const unsigned int oySize = CHANNELS_HEIGHT + PADDING_TOP + PADDING_BOTTOM;
+    const unsigned int oxSize = CHANNELS_WIDTH + PADDING_LEFT + PADDING_RIGHT;
+
+    for (unsigned int batch = 0; batch < NB_BATCHES; ++batch) {
+        for (unsigned int ch = 0; ch < NB_CHANNELS; ++ch) {
+            const unsigned int preIndex = batch * NB_CHANNELS * CHANNELS_HEIGHT * CHANNELS_WIDTH + ch * CHANNELS_HEIGHT * CHANNELS_WIDTH;
+
+            for (unsigned int oy = 0; oy < oySize; ++oy) {
+                for (unsigned int ox = 0; ox < oxSize; ++ox) {
+                    const unsigned int outIndex = batch * NB_CHANNELS * oySize * oxSize + ch * oySize * oxSize + oy * oxSize + ox;
 
+                    outputs[outIndex] = borderValue;
+
+                    const unsigned int inputX = ox - PADDING_LEFT;
+                    const unsigned int inputY = oy - PADDING_TOP;
+
+                    if (inputY >= 0 and inputY < CHANNELS_HEIGHT and inputX >= 0 and inputX < CHANNELS_WIDTH)
+                    {
+                        outputs[outIndex] = inputs[preIndex + inputY * CHANNELS_WIDTH + inputX];
+                    }
+                }
+            }
         }
     }
 }
diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 5abb137d9431e2b21b83ecacd5329ebc496c61f4..a6ad95d26c0d7649611a6940646c7b4cd72364e5 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -73,10 +73,16 @@ class ProducerCPP(ExportNode):
 
 # TODO : find a way to remove this dummy exportnode
 @ExportLibCpp.register("Pad2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.any)))
-class Pad_ARMCortexM(ExportNodeCpp):
+class PadCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
         self.attributes["padding"] = node.get_operator().attr.begin_end_borders
+        self.attributes["border_type"] = node.get_operator().attr.border_type
+        self.attributes["border_value"] = node.get_operator().attr.border_value
+
+        assert self.attributes["border_type"] == aidge_core.pad_border_type.Constant, (
+            f"export Pad2d: border_type == {node.get_operator().attr.border_type} not implemented"
+        )
 
         self.config_template = str(
             ROOT / "templates" / "configuration" / "pad_config.jinja")
@@ -87,8 +93,6 @@ class Pad_ARMCortexM(ExportNodeCpp):
             str(ROOT / "kernels" / "pad.hpp")
         ]
 
-
-
 @ExportLibCpp.register("ReLU", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class ReLUCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
diff --git a/aidge_export_cpp/templates/configuration/pad_config.jinja b/aidge_export_cpp/templates/configuration/pad_config.jinja
index 527e5c08e4e70370616e0794f3508f5c62583124..8b21577fe4d6f52ddb36ae796740f265db3d45cc 100644
--- a/aidge_export_cpp/templates/configuration/pad_config.jinja
+++ b/aidge_export_cpp/templates/configuration/pad_config.jinja
@@ -4,7 +4,10 @@
 {# For layer configuration -#}
 {% include "./_def_io.jinja" %}
 {% include "./_meminfo.jinja" %}
-#define {{ name|upper }}_PADDING_Y {{ padding[1] }}
-#define {{ name|upper }}_PADDING_X {{ padding[0] }}
+#define {{ name|upper }}_PADDING_BOTTOM {{ padding[2] }}
+#define {{ name|upper }}_PADDING_RIGHT {{ padding[3] }}
+#define {{ name|upper }}_PADDING_TOP {{ padding[0] }}
+#define {{ name|upper }}_PADDING_LEFT {{ padding[1] }}
+#define {{ name|upper }}_BORDER_VALUE {{ border_value }}
 
 #endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_cpp/templates/kernel_forward/pad_forward.jinja b/aidge_export_cpp/templates/kernel_forward/pad_forward.jinja
index 04976e9fbac4a268c02a94f6af6f846f50f783ae..721418709f589d56723156797d7e45afe1259a7b 100644
--- a/aidge_export_cpp/templates/kernel_forward/pad_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/pad_forward.jinja
@@ -1,13 +1,16 @@
 {% filter indent(width=4, first=False) %}
 {% include "./_mem_offset.jinja" %}
-convolution_forward<{{ in_name[0]|upper }}_NB_CHANNELS,
-                    {{ in_name[0]|upper }}_IN_HEIGHT,
-                    {{ in_name[0]|upper }}_IN_WIDTH,
-                    {{ out_name[0]|upper }}_NB_OUTPUTS,
-                    {{ out_name[0]|upper }}_OUT_HEIGHT,
-                    {{ out_name[0]|upper }}_OUT_WIDTH,
-                    {{name|upper}}_PADDING_Y,
-                    {{name|upper}}_PADDING_X>
-                    ({{in_name[0]}}, {{out_name[0]}});
+pad_forward<{{ in_name[0]|upper }}_IN_BATCH,
+            {{ in_name[0]|upper }}_NB_CHANNELS,
+            {{ in_name[0]|upper }}_IN_HEIGHT,
+            {{ in_name[0]|upper }}_IN_WIDTH,
+            {{ out_name[0]|upper }}_NB_OUTPUTS,
+            {{ out_name[0]|upper }}_OUT_HEIGHT,
+            {{ out_name[0]|upper }}_OUT_WIDTH,
+            {{name|upper}}_PADDING_TOP,
+            {{name|upper}}_PADDING_LEFT,
+            {{name|upper}}_PADDING_BOTTOM,
+            {{name|upper}}_PADDING_RIGHT>
+            ({{name|upper}}_BORDER_VALUE, {{in_name[0]}}, {{out_name[0]}});
 {% include "./_save_outputs.jinja" %}
 {% endfilter %}
diff --git a/aidge_export_cpp/unit_tests/test_export.py b/aidge_export_cpp/unit_tests/test_export.py
index 273c9b4b0481771a2f90a8a6818787e37d28efd4..9ee6443ab6b5143b4df5c6ae373adf4067fa3f6d 100644
--- a/aidge_export_cpp/unit_tests/test_export.py
+++ b/aidge_export_cpp/unit_tests/test_export.py
@@ -211,6 +211,55 @@ class test_operator_export(unittest.TestCase):
 
         self.unit_test_export(model, "Mul", [[1, 5, 5]])
 
+    def test_export_mul_larger(self):
+        print("MulLarger")
+        model = aidge_core.sequential([
+            aidge_core.Producer([1, 7, 5], name="producer"),
+            aidge_core.Mul(name="mul")
+        ])
+
+        self.unit_test_export(model, "Mul", [[1, 7, 5]])
+
+    def test_export_mul_higher(self):
+        print("MulHigher")
+        model = aidge_core.sequential([
+            aidge_core.Producer([1, 5, 7], name="producer"),
+            aidge_core.Mul(name="mul")
+        ])
+
+        self.unit_test_export(model, "Mul", [[1, 5, 7]])
+
+    # "Broadcast not supported yet in export operator"
+    @unittest.expectedFailure
+    def test_export_mul_simple_broadcast(self):
+        print("MulSimpleBroadcast")
+        model = aidge_core.sequential([
+            aidge_core.Producer([1, 1, 5], name="producer"),
+            aidge_core.Mul(name="mul")
+        ])
+
+        self.unit_test_export(model, "MulSimpleBroadcast", [[1, 7, 5]])
+
+    # "Broadcast not supported yet in export operator"
+    @unittest.expectedFailure
+    def test_export_mul_double_broadcast(self):
+        print("MulDoubleBroadcast")
+        model = aidge_core.sequential([
+            aidge_core.Producer([1, 1, 7], name="producer"),
+            aidge_core.Mul(name="mul")
+        ])
+
+        self.unit_test_export(model, "MulDoubleBroadcast", [[1, 5, 1]])
+
+    def test_export_mul_batch(self):
+        print("MulBatch")
+        model = aidge_core.sequential([
+            aidge_core.Producer([3, 5, 7], name="producer"),
+            aidge_core.Mul(name="mul")
+        ])
+
+        self.unit_test_export(model, "MulBatch", [[3, 5, 7]])
+
     def test_export_concat(self):
         print("Concat")
         model = aidge_core.sequential([
@@ -250,7 +299,56 @@ class test_operator_export(unittest.TestCase):
             aidge_core.Pad2D((1, 1, 1, 1), name="pad2d")
         ])
 
-        self.unit_test_export(model, "Pad2D", [[1, 1, 10, 10]])
+        self.unit_test_export(model, "Pad2D", [[1, 1, 11, 11]])
+
+    def test_export_pad2D_larger(self):
+        print("Pad2DLarger")
+        model = aidge_core.sequential([
+            aidge_core.Pad2D((1, 3, 1, 3), name="pad2d")
+        ])
+
+        self.unit_test_export(model, "Pad2DLarger", [[1, 1, 7, 11]])
+
+    def test_export_pad2D_higher(self):
+        print("Pad2DHigher")
+        model = aidge_core.sequential([
+            aidge_core.Pad2D((3, 1, 3, 1), name="pad2d")
+        ])
+
+        self.unit_test_export(model, "Pad2DHigher", [[1, 1, 11, 7]])
+
+    def test_export_pad2D_mismatch(self):
+        print("Pad2DMismatch")
+        model = aidge_core.sequential([
+            aidge_core.Pad2D((1, 3, 5, 7), name="pad2d")
+        ])
+
+        self.unit_test_export(model, "Pad2DMismatch", [[3, 5, 11, 7]])
+
+    def test_export_pad2D_denser(self):
+        print("Pad2DDenser")
+        model = aidge_core.sequential([
+            aidge_core.Pad2D((3, 3, 3, 3), name="pad2d")
+        ])
+
+        self.unit_test_export(model, "Pad2DDenser", [[1, 5, 7, 11]])
+
+    def test_export_pad2D_with_bigger_batch_size(self):
+        print("Pad2DBiggerBatchSize")
+        model = aidge_core.sequential([
+            aidge_core.Pad2D((1, 1, 1, 1), name="pad2d")
+        ])
+
+        self.unit_test_export(model, "Pad2DBiggerBatchSize", [[3, 5, 7, 11]])
+
+    @unittest.expectedFailure
+    def test_export_pad2D_not_constant(self):
+        print("Pad2DNotConstant")
+        model = aidge_core.sequential([
+            aidge_core.Pad2D((3, 3, 3, 3), border_type=aidge_core.pad_border_type.Wrap, name="pad2d")
+        ])
+
+        self.unit_test_export(model, "Pad2DNotConstant", [[1, 5, 7, 11]])
 
     def test_export_batchnorm2D(self):
         print("BatchNormalization2D")
@@ -260,6 +358,38 @@ class test_operator_export(unittest.TestCase):
 
         self.unit_test_export(model, "BatchNorm2D", [[1, 1, 5, 5]], False, False)
 
+    def test_export_batchnorm2D_Larger(self):
+        print("BatchNormalization2DLarger")
+        model = aidge_core.sequential([
+            aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
+        ])
+
+        self.unit_test_export(model, "BatchNorm2DLarger", [[1, 1, 5, 7]], False, False)
+
+    def test_export_batchnorm2D_Higher(self):
+        print("BatchNormalization2DHigher")
+        model = aidge_core.sequential([
+            aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
+        ])
+
+        self.unit_test_export(model, "BatchNorm2DHigher", [[1, 1, 7, 5]], False, False)
+
+    def test_export_batchnorm2D_Denser(self):
+        print("BatchNormalization2DDenser")
+        model = aidge_core.sequential([
+            aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
+        ])
+
+        self.unit_test_export(model, "BatchNorm2DDenser", [[1, 3, 5, 7]], False, False)
+
+    def test_export_batchnorm2D_with_bigger_batch_size(self):
+        print("BatchNormalization2DBiggerBatchSize")
+        model = aidge_core.sequential([
+            aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn")
+        ])
+
+        self.unit_test_export(model, "BatchNormalization2DBiggerBatchSize", [[4, 3, 5, 7]], False, False)
+
 
     def test_export_cpp(self):
         print("Export test to do")