diff --git a/aidge_export_cpp/kernels/convolution.hpp b/aidge_export_cpp/kernels/convolution.hpp
index efc7ee7a192112018d6c582207560d93b4548add..6ea9f0579b84dd5a28a5ea66a778326fcd9c84ce 100644
--- a/aidge_export_cpp/kernels/convolution.hpp
+++ b/aidge_export_cpp/kernels/convolution.hpp
@@ -65,8 +65,8 @@ void convolution_forward(
                 int oOffset = NB_OUTPUTS * oPos;
 
                 // <--
-
-                Bias_T weightedSum = biases[output];
+                // Check if the biases are defined
+                Bias_T weightedSum = biases ? biases[output] : 0;
 
                 for (int sy = 0; sy < KERNEL_HEIGHT; ++sy) {
                     if ((PADDING_Y != 0
@@ -116,4 +116,45 @@ void convolution_forward(
     }
 }
 
+// Template specialization when biases are not given to the convolution
+template<int NB_CHANNELS,
+         int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
+         int NB_OUTPUTS,
+         int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
+         int PADDING_Y, int PADDING_X,
+         int STRIDE_Y, int STRIDE_X,
+         int DILATION_Y, int DILATION_X,
+         int KERNEL_HEIGHT, int KERNEL_WIDTH,
+         ActivationFunction_T ACTIVATION,
+         typename Input_T, typename Output_T,
+         typename Weight_T,
+         typename Rescaling_T>
+__attribute__((always_inline)) inline
+void convolution_forward(
+    const Input_T* __restrict inputs,
+    Output_T* __restrict outputs,
+    const Weight_T* __restrict weights,
+    std::nullptr_t __restrict,
+    const Rescaling_T& __restrict rescaling)
+{
+    const float* b = nullptr;
+
+    convolution_forward<NB_CHANNELS,
+                        CHANNELS_HEIGHT,
+                        CHANNELS_WIDTH,
+                        NB_OUTPUTS,
+                        OUTPUTS_HEIGHT,
+                        OUTPUTS_WIDTH,
+                        PADDING_Y,
+                        PADDING_X,
+                        STRIDE_Y,
+                        STRIDE_X,
+                        DILATION_Y,
+                        DILATION_X,
+                        KERNEL_HEIGHT,
+                        KERNEL_WIDTH,
+                        ACTIVATION>
+                        (inputs, outputs, weights, b, rescaling);
+}
+
 #endif  // __AIDGE_EXPORT_CPP_KERNELS_CONVOLUTION__
diff --git a/aidge_export_cpp/kernels/reshape.hpp b/aidge_export_cpp/kernels/reshape.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a5828da39553222ef933f0355424e50956ca8490
--- /dev/null
+++ b/aidge_export_cpp/kernels/reshape.hpp
@@ -0,0 +1,27 @@
+#ifndef __AIDGE_EXPORT_CPP_KERNELS_RESHAPE__
+#define __AIDGE_EXPORT_CPP_KERNELS_RESHAPE__
+
+#include "network/typedefs.hpp"
+
+// Generic function for reshape and activation
+
+template<int M,
+         typename Input_T, typename Output_T>
+__attribute__((always_inline)) inline
+void reshape_forward (
+    const Input_T* __restrict, // First input is useless as it only dictate the resulting layout of the reshape
+    const Input_T* __restrict inputs2,
+    Output_T* __restrict outputs)
+{
+    // If inputs and outputs pointers are the same, the memory manager has already optimized this function so it is a no-op !
+    if (inputs2 == outputs)
+        return;
+
+    // A reshape in c++ world should equal to a Noop
+    // We only need to copy the input buffer to the output
+    for (int m = 0; m < M; ++m) {
+        outputs[m] = inputs2[m];
+    }
+}
+
+#endif  // __AIDGE_EXPORT_CPP_KERNELS_RESHAPE__
\ No newline at end of file
diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 54c38055b2c622aa1796bc1e7ff2dd46e60afcba..346928f4a84c403df2172311cede8b99fd06eebe 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -94,27 +94,65 @@ class ReLUCPP(ExportNodeCpp):
             str(ROOT / "kernels" / "rescaling.hpp")
         ]
 
-@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
-class ConvCPP(ExportNodeCpp):
+@ExportLibCpp.register("Reshape", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class ReshapeCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        self.config_template = str(
+            ROOT / "templates" / "configuration" / "reshape_config.jinja")
+        self.forward_template = str(
+            ROOT / "templates" / "kernel_forward" / "reshape_forward.jinja")
+        self.include_list = []
+        self.kernels_to_copy = [
+            str(ROOT / "kernels" / "reshape.hpp"),
+        ]
+
+@ExportLibCpp.register("MatMul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class MatMulCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
-        # No padding with Conv
-        # Use PaddedConv to add padding attribute
-        self.attributes["padding"] = [0, 0]
         self.attributes["activation"] = "Linear"
         self.attributes["rescaling"] = "NoScaling"
         self.config_template = str(
-            ROOT / "templates" / "configuration" / "convolution_config.jinja")
+            ROOT / "templates" / "configuration" / "matmul_config.jinja")
         self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
+            ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja")
         self.include_list = []
         self.kernels_to_copy = [
-            str(ROOT / "kernels" / "convolution.hpp"),
-            str(ROOT / "kernels" / "macs.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
+            str(ROOT / "kernels" / "matmul.hpp"),
         ]
 
+def _setup_conv2D(conv):
+    """Common setup code for convolutions: Conv2D and PaddedConv2D."""
+
+    # If biases are not provided we set it as nullptr instead of None
+    if (len(conv.attributes["in_name"]) > 2 and conv.attributes["in_name"][2] is None):
+        conv.attributes["in_name"][2] = "nullptr"
+
+    conv.attributes["activation"] = "Linear"
+    conv.attributes["rescaling"] = "NoScaling"
+    conv.config_template = str(
+        ROOT / "templates" / "configuration" / "convolution_config.jinja")
+    conv.forward_template = str(
+        ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
+    conv.include_list = []
+    conv.kernels_to_copy = [
+        str(ROOT / "kernels" / "convolution.hpp"),
+        str(ROOT / "kernels" / "macs.hpp"),
+        str(ROOT / "kernels" / "activation.hpp"),
+        str(ROOT / "kernels" / "rescaling.hpp")
+    ]
+
+@ExportLibCpp.register("Conv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
+class ConvCPP(ExportNodeCpp):
+    def __init__(self, node, mem_info):
+        super().__init__(node, mem_info)
+        # No padding with Conv
+        # Use PaddedConv to add padding attribute
+        self.attributes["padding"] = [0, 0]
+
+        _setup_conv2D(self)
+
 @ExportLibCpp.register_metaop("PaddedConv2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class PaddedConvCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
@@ -131,74 +169,60 @@ class PaddedConvCPP(ExportNodeCpp):
                 ).attr.stride_dims
                 self.attributes["dilation_dims"] = n.get_operator(
                 ).attr.dilation_dims
-        self.attributes["activation"] = "Linear"
-        self.attributes["rescaling"] = "NoScaling"
-        self.config_template = str(
-            ROOT / "templates" / "configuration" / "convolution_config.jinja")
-        self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "convolution_forward.jinja")
-        self.include_list = []
-        self.kernels_to_copy = [
-            str(ROOT / "kernels" / "convolution.hpp"),
-            str(ROOT / "kernels" / "macs.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
-        ]
+
+        _setup_conv2D(self)
+
+def _setup_elemwise_op(elemwise, op):
+    """Common code (template and kernel setup) shared across all the different elementWise operator (Add, Sub,...)."""
+
+    elemwise.attributes["elemwise_op"] = op
+    elemwise.attributes["activation"] = "Linear"
+    elemwise.attributes["rescaling"] = "NoScaling"
+    elemwise.config_template = str(
+        ROOT / "templates" / "configuration" / "elemwise_config.jinja")
+    elemwise.forward_template = str(
+        ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
+    elemwise.include_list = []
+    elemwise.kernels_to_copy = [
+        str(ROOT / "kernels" / "elemwise.hpp"),
+        str(ROOT / "kernels" / "activation.hpp"),
+        str(ROOT / "kernels" / "rescaling.hpp")
+    ]
 
 @ExportLibCpp.register("Add", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class AddCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
-        self.attributes["elemwise_op"] = "Add"
-        self.attributes["activation"] = "Linear"
-        self.attributes["rescaling"] = "NoScaling"
-        self.config_template = str(
-            ROOT / "templates" / "configuration" / "elemwise_config.jinja")
-        self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
-        self.include_list = []
-        self.kernels_to_copy = [
-            str(ROOT / "kernels" / "elemwise.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
-        ]
+
+        _setup_elemwise_op(self, "Add")
 
 @ExportLibCpp.register("Sub", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class SubCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
-        self.attributes["elemwise_op"] = "Sub"
-        self.attributes["activation"] = "Linear"
-        self.attributes["rescaling"] = "NoScaling"
-        self.config_template = str(
-            ROOT / "templates" / "configuration" / "elemwise_config.jinja")
-        self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
-        self.include_list = []
-        self.kernels_to_copy = [
-            str(ROOT / "kernels" / "elemwise.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
-        ]
 
+        _setup_elemwise_op(self, "Sub")
 
 @ExportLibCpp.register("Mul", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class MulCPP(ExportNodeCpp):
     def __init__(self, node, mem_info):
         super().__init__(node, mem_info)
-        self.attributes["elemwise_op"] = "Mul"
-        self.attributes["activation"] = "Linear"
-        self.attributes["rescaling"] = "NoScaling"
-        self.config_template = str(
-            ROOT / "templates" / "configuration" / "elemwise_config.jinja")
-        self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "elemwise_forward.jinja")
-        self.include_list = []
-        self.kernels_to_copy = [
-            str(ROOT / "kernels" / "elemwise.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
-        ]
+
+        _setup_elemwise_op(self, "Mul")
+
+def _setup_pooling(pooling):
+    """Common code (template and kernel setup) shared across all the different pooling operator."""
+
+    pooling.config_template = str(
+        ROOT / "templates" / "configuration" / "pooling_config.jinja")
+    pooling.forward_template = str(
+        ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
+    pooling.include_list = []
+    pooling.kernels_to_copy = [
+        str(ROOT / "kernels" / "pooling.hpp"),
+        str(ROOT / "kernels" / "activation.hpp"),
+        str(ROOT / "kernels" / "rescaling.hpp")
+    ]
 
 @ExportLibCpp.register("MaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class MaxPoolCPP(ExportNodeCpp):
@@ -211,17 +235,7 @@ class MaxPoolCPP(ExportNodeCpp):
         self.attributes["pool_type"] = "Max"
         self.attributes["activation"] = "Linear"
 
-        self.config_template = str(
-            ROOT / "templates" / "configuration" / "pooling_config.jinja")
-        self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
-        self.include_list = []
-        self.kernels_to_copy = [
-            str(ROOT / "kernels" / "pooling.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
-        ]
-
+        _setup_pooling(self)
 
 @ExportLibCpp.register_metaop("PaddedMaxPooling2D", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class PaddedMaxPoolCPP(ExportNodeCpp):
@@ -239,17 +253,7 @@ class PaddedMaxPoolCPP(ExportNodeCpp):
         self.attributes["pool_type"] = "Max"
         self.attributes["activation"] = "Linear"
 
-        self.config_template = str(
-            ROOT / "templates" / "configuration" / "pooling_config.jinja")
-        self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
-        self.include_list = []
-        self.kernels_to_copy = [
-            str(ROOT / "kernels" / "pooling.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
-        ]
-
+        _setup_pooling(self)
 
 @ExportLibCpp.register("GlobalAveragePooling", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class GlobalAveragePoolCPP(ExportNodeCpp):
@@ -267,16 +271,7 @@ class GlobalAveragePoolCPP(ExportNodeCpp):
         self.attributes["pool_type"] = "Average"
         self.attributes["activation"] = "Linear"
 
-        self.config_template = str(
-            ROOT / "templates" / "configuration" / "pooling_config.jinja")
-        self.forward_template = str(
-            ROOT / "templates" / "kernel_forward" / "pooling_forward.jinja")
-        self.include_list = []
-        self.kernels_to_copy = [
-            str(ROOT / "kernels" / "pooling.hpp"),
-            str(ROOT / "kernels" / "activation.hpp"),
-            str(ROOT / "kernels" / "rescaling.hpp")
-        ]
+        _setup_pooling(self)
 
 @ExportLibCpp.register("FC", aidge_core.ImplSpec(aidge_core.IOSpec(aidge_core.dtype.float32)))
 class FcCPP(ExportNodeCpp):
diff --git a/aidge_export_cpp/templates/configuration/matmul_config.jinja b/aidge_export_cpp/templates/configuration/matmul_config.jinja
index fece988ac13b0136a8506abb39998114923817d6..38316f20947fa726085bf3577ead510e6c5096f3 100644
--- a/aidge_export_cpp/templates/configuration/matmul_config.jinja
+++ b/aidge_export_cpp/templates/configuration/matmul_config.jinja
@@ -2,10 +2,13 @@
 #ifndef {{ name|upper }}_LAYER_H
 #define {{ name|upper }}_LAYER_H
 
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+
 {# For layer configuration -#}
-#define {{ name|upper }}_M {{ inputs_dims[0][0] }}
-#define {{ name|upper }}_K {{ inputs_dims[0][1] }}
-#define {{ name|upper }}_N {{ inputs_dims[1][1] }}
+#define {{ name|upper }}_M {{ in_dims[0][0] }}
+#define {{ name|upper }}_K {{ in_dims[0][1] }}
+#define {{ name|upper }}_N {{ in_dims[1][1] }}
 #define {{ name|upper }}_ACTIVATION {{ activation }}
 static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
 
diff --git a/aidge_export_cpp/templates/configuration/reshape_config.jinja b/aidge_export_cpp/templates/configuration/reshape_config.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..041cf8ae898ed561e050354d137c1c676723a81f
--- /dev/null
+++ b/aidge_export_cpp/templates/configuration/reshape_config.jinja
@@ -0,0 +1,8 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{% include "./_def_io.jinja" %}
+{% include "./_meminfo.jinja" %}
+{# For layer configuration -#}
+#define {{ name|upper }}_NB_ELTS {{ in_dims[0]|join('*') }}
diff --git a/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja b/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
index ce80ffd2abc90ad611d3008c57aae36383691452..64b3df301794e1cb3d56170646a6b9524f18a6ab 100644
--- a/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
+++ b/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
@@ -1,5 +1,9 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
 matmul_forward<{{name|upper}}_M,
                {{name|upper}}_K,
                {{name|upper}}_N,
                {{name|upper}}_ACTIVATION>
-               ({{inputs1_name}}, {{inputs2_name}}, {{outputs_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file
+               ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}}, {{name|upper}}_RESCALING);
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}
diff --git a/aidge_export_cpp/templates/kernel_forward/reshape_forward.jinja b/aidge_export_cpp/templates/kernel_forward/reshape_forward.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..f9752bcc85255ba321082fbf5cf599f45b3ab4c4
--- /dev/null
+++ b/aidge_export_cpp/templates/kernel_forward/reshape_forward.jinja
@@ -0,0 +1,6 @@
+{% filter indent(width=4, first=False) %}
+{% include "./_mem_offset.jinja" %}
+reshape_forward<{{name|upper}}_NB_ELTS>
+                 ({{in_name[0]}}, {{in_name[1]}}, {{out_name[0]}});
+{% include "./_save_outputs.jinja" %}
+{% endfilter %}