diff --git a/aidge_export_cpp/kernels/matmul.hpp b/aidge_export_cpp/kernels/matmul.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..4500993e02cf42fb698bc9004462800bdd3f7dc4
--- /dev/null
+++ b/aidge_export_cpp/kernels/matmul.hpp
@@ -0,0 +1,33 @@
+#ifndef __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
+#define __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
+
+#include "network/typedefs.hpp"
+#include "kernels/activation.hpp"
+
+// Generic function for matmul and activation
+
+template<int M,
+         int K,
+         int N,
+         ActivationFunction_T ACTIVATION,
+         typename Input_T, typename Output_T,
+         typename Rescaling_T>
+__attribute__((always_inline)) inline
+void matmul_forward (
+    const Input_T* __restrict inputs1,
+    const Input_T* __restrict inputs2,
+    Output_T* __restrict outputs,
+    const Rescaling_T& __restrict rescaling)
+{
+    for (int m = 0; m < M; ++m) {
+        for (int n = 0; n < N; ++n) {
+            Output_T sum = Output_T(0);
+            for (int k = 0; k < K; ++k) {
+                sum += inputs1[K*m + k] * inputs2[N*k + n];
+            }
+            outputs[N*m + n] = activation_forward_value<Output_T>(sum, 0/*not applicable*/, ACTIVATION, rescaling);
+        }
+    }
+}
+
+#endif  // __AIDGE_EXPORT_CPP_KERNELS_MATMUL__
diff --git a/aidge_export_cpp/operators.py b/aidge_export_cpp/operators.py
index 0cfb0797930458e666bfef8940f62fdf3bb6da32..602aea8dcee699ced05863ecc627e94193e9e653 100644
--- a/aidge_export_cpp/operators.py
+++ b/aidge_export_cpp/operators.py
@@ -453,3 +453,68 @@ class FcCPP(ExportNode):
         ))
         return list_actions
 
+@operator_register("MatMul")
+class MatMulCPP(ExportNode):
+    def __init__(self, node):
+        super().__init__(node)
+
+        dims0, dims1, outdims = [tuple(x) for x in [self.inputs_dims[0], self.inputs_dims[1], self.outputs_dims[0]]]
+
+        # TODO: MatMul aidge operator supports N-D multi broadcast dimensions where N > 2
+        assert len(dims0) <= 2 and len(dims1) <= 2, (
+            f"MatMul export do not support yet dimensions above 2D:  inputs shapes are: {dims0}, {dims1}")
+
+        # Cast to at least 1D
+        # Note that from MatMul::forwardDims(), scalar inputs are supported
+        # which is actually more general than np.matmul
+        dims0 = dims0 if len(dims0) >= 1 else (1, 1)
+        dims1 = dims1 if len(dims1) >= 1 else (1, 1)
+
+        # Cast to at least 2D
+        dims0 = dims0 if len(dims0) >= 2 else (1, dims0[0])
+        dims1 = dims1 if len(dims1) >= 2 else (dims1[0], 1)
+        assert dims0[1] == dims1[0], (
+            f"MatMul input dimensions do no match, expected (m, k), (k, n): inputs shapes are: {dims0}, {dims1}")
+
+        outdims = outdims if len(outdims) > 0 else (1, 1)
+        assert outdims == (dims0[0], dims1[1]), (
+            f"MatMul output dimensions do no match, expected (m, n) for inputs (m, k) (k, n): output shape is: {outdims}, inputs shapes are: {dims0}, {dims1}")
+
+        self.matmul_inputs_dims = dims0, dims1
+        self.matmul_output_dims = outdims
+
+    def export(self, export_folder:Path, list_configs:list):
+
+        copyfile(str(ROOT / "kernels" / "matmul.hpp"),
+                 str(export_folder / "include" / "kernels"))
+        copyfile(str(ROOT / "kernels" / "activation.hpp"),
+                 str(export_folder / "include" / "kernels"))
+
+        # Add to config list the include of configurations
+        list_configs.append("kernels/matmul.hpp")
+        list_configs.append(f"layers/{self.name}.h")
+
+        # Export configuration file
+        generate_file(
+            str(export_folder / "layers" / f"{self.name}.h"),
+            str(ROOT / "templates" / "configuration" / "matmul_config.jinja"),
+            name=self.name,
+            inputs_dims=self.matmul_inputs_dims,
+            output_dims=self.matmul_output_dims,
+            activation="Linear",
+            rescaling="NoScaling",
+        )
+
+        return list_configs
+
+    def forward(self, list_actions:list):
+        if not self.is_last:
+            list_actions.append(set_up_output(self.name, "float"))
+        list_actions.append(generate_str(
+            str(ROOT / "templates" / "kernel_forward" / "matmul_forward.jinja"),
+            name=self.name,
+            inputs1_name=self.inputs[0].name() if self.inputs[0] else self.name + "_input1",
+            inputs2_name=self.inputs[1].name() if self.inputs[1] else self.name + "_input2",
+            outputs_name=self.name
+        ))
+        return list_actions
diff --git a/aidge_export_cpp/templates/configuration/matmul_config.jinja b/aidge_export_cpp/templates/configuration/matmul_config.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..fece988ac13b0136a8506abb39998114923817d6
--- /dev/null
+++ b/aidge_export_cpp/templates/configuration/matmul_config.jinja
@@ -0,0 +1,15 @@
+{#- For name header -#}
+#ifndef {{ name|upper }}_LAYER_H
+#define {{ name|upper }}_LAYER_H
+
+{# For layer configuration -#}
+#define {{ name|upper }}_M {{ inputs_dims[0][0] }}
+#define {{ name|upper }}_K {{ inputs_dims[0][1] }}
+#define {{ name|upper }}_N {{ inputs_dims[1][1] }}
+#define {{ name|upper }}_ACTIVATION {{ activation }}
+static const {{ rescaling }} {{ name|upper }}_RESCALING = {};
+
+{#- Calculate sizes #}
+
+
+#endif /* {{ name|upper }}_LAYER_H */
diff --git a/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja b/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
new file mode 100644
index 0000000000000000000000000000000000000000..ce80ffd2abc90ad611d3008c57aae36383691452
--- /dev/null
+++ b/aidge_export_cpp/templates/kernel_forward/matmul_forward.jinja
@@ -0,0 +1,5 @@
+matmul_forward<{{name|upper}}_M,
+               {{name|upper}}_K,
+               {{name|upper}}_N,
+               {{name|upper}}_ACTIVATION>
+               ({{inputs1_name}}, {{inputs2_name}}, {{outputs_name}}, {{name|upper}}_RESCALING);
\ No newline at end of file