diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 95b2f7b8e2ff70c9b9224bea1137ad74e469ffb8..764a8e4b0f9b31882be60659298d57687a771341 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -20,6 +20,7 @@
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
+#include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..437d0228706f6d8f0972dab6f6aef1830f938883
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -0,0 +1,76 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_MATMULIMPL_H_
+#define AIDGE_CPU_OPERATOR_MATMULIMPL_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+// class MatMul_Op;
+
+// compute kernel registry for forward and backward
+class MatMulImplForward_cpu
+    : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType, DataType>,
+                         void(const MatMul_Op::Parameters &, const DimSize_t, const DimSize_t,
+                              const void *, const void *, void *)> {};
+class MatMulImplBackward_cpu
+    : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType, DataType>,
+                         void(const MatMul_Op::Parameters &, const DimSize_t, const DimSize_t,
+                              const void *, const void *, void *)> {};
+
+class MatMulImpl_cpu : public OperatorImpl {
+private:
+    const MatMul_Op &mOp;
+    std::array<NbElts_t, 2> mNbConsumedData;
+    std::array<NbElts_t, 1> mNbProducedData;
+
+public:
+    MatMulImpl_cpu(const MatMul_Op &op)
+        : mOp(op),
+          mNbConsumedData({0, 0}),
+          mNbProducedData({0})
+        {
+            // ctor
+        }
+
+    static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op)
+    {
+        return std::make_unique<MatMulImpl_cpu>(op);
+    }
+
+public:
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/,
+                               const std::vector<DimSize_t> & /*inputsSize*/) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+
+    void updateConsummerProducer() override final;
+
+    void forward();
+    void backward();
+};
+
+namespace {
+static Registrar<MatMul_Op> registrarMatMulImpl_cpu("cpu", Aidge::MatMulImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_MATMULIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2ef7e4337d2d8eb8380bfbec02ad525f211b0715
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp
@@ -0,0 +1,58 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include <algorithm>
+
+#include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+
+namespace Aidge {
+
+template <class I, class W, class O>
+void MatMulImpl_cpu_forward_kernel(const MatMul_Op::Parameters& params, const DimSize_t batchSize, const DimSize_t oneInputSize,
+                                   const void* input_, const void* weights_, void* output_) {
+    // FIXME: missing MatMul parameters as arguments
+    const I* input = static_cast<const I*>(input_);
+    const W* weights = static_cast<const W*>(weights_);
+    O* output = static_cast<O*>(output_);
+
+
+    std::fill(output, output+(batchSize*std::get<0>(params)), O(0));
+
+    for (std::size_t batch = 0; batch < batchSize; ++batch) {
+        for (std::size_t out = 0; out < std::get<0>(params); ++out) {
+            output[out + batch*std::get<0>(params)] = std::inner_product(input + batch*oneInputSize,
+                                                        input + (batch + 1)*oneInputSize,
+                                                        weights + out*oneInputSize,
+                                                        output[out + batch*std::get<0>(params)]);
+        }
+    }
+}
+
+
+namespace {
+static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::MatMulImpl_cpu_forward_kernel<float, float, float>);
+static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::MatMulImpl_cpu_forward_kernel<int, int, int>);
+static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::MatMulImpl_cpu_forward_kernel<double, double, double>);
+}  // namespace
+
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_ */
diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..47f11dca7605ddee71e47dc6b2ae87cff6efba4b
--- /dev/null
+++ b/src/operator/MatMulImpl.cpp
@@ -0,0 +1,121 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+#include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::MatMulImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const
+{
+    assert(mOp.getInput(inputIdx) && "requires valid input");
+
+    // Requires the whole tensors
+    const auto &inputDims
+        = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
+
+    return std::accumulate(
+        inputDims.begin(),
+        inputDims.end(),
+        Aidge::NbElts_t(1),
+        std::multiplies<Aidge::NbElts_t>());
+}
+
+Aidge::NbElts_t
+    Aidge::MatMulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const
+{
+    // for the direct convolution algorithm, convolutions can be in-place, if
+    // there is no padding!
+    return 0;
+}
+
+Aidge::NbElts_t Aidge::MatMulImpl_cpu::getRequiredMemory(
+    const IOIndex_t outputIdx, const std::vector<DimSize_t> &/*inputsSize*/) const
+{
+    // Requires the whole tensors, regardless of available data on inputs
+    assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
+
+    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
+    return std::accumulate(
+        outputDims.begin(),
+        outputDims.end(),
+        static_cast<NbElts_t>(1),
+        std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::MatMulImpl_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const
+{
+    assert((inputIdx != gk_IODefaultIndex) && (inputIdx < mNbConsumedData.size()));
+    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
+}
+
+Aidge::NbElts_t Aidge::MatMulImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const
+{
+    assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
+    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
+}
+
+void Aidge::MatMulImpl_cpu::updateConsummerProducer(){
+    // Update producer-consumer data
+    for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
+        mNbConsumedData[inputIdx]
+            += getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
+                                              // amount for a forward pass
+
+    mNbProducedData[0] += getRequiredMemory(0, {});
+}
+
+void Aidge::MatMulImpl_cpu::forward()
+{
+    // FIXME: uncomment the following code once memory handling will work
+    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.mInputs[1] && "missing input #1");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<MatMulImplForward_cpu>::create(
+        {mOp.getInput(0)->dataType(),
+         mOp.mInputs[1]->dataType(),
+         mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    // if (mOp.getInput(0)->nbDims() == 4) {
+    //     kernelFunc(
+    //         mOp.getParams(),
+    //         std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
+    //         mOp.getInput(0)->getImpl()->rawPtr(),
+    //         mOp.mInputs[1]->getImpl()->rawPtr(),
+    //         mOp.mInputs[2]->getImpl()->rawPtr(),
+    //         mOp.getOutput(0)->getImpl()->rawPtr());
+    // }
+    // else
+    kernelFunc(
+        mOp.getParams(),
+        mOp.getInput(0)->dims()[0],
+        mOp.getInput(0)->sizeM1(),
+        mOp.getInput(0)->getImpl()->rawPtr(),
+        mOp.mInputs[1]->getImpl()->rawPtr(),
+        mOp.getOutput(0)->getImpl()->rawPtr());
+
+
+}
+
+void Aidge::MatMulImpl_cpu::backward()
+{
+    printf("Not implemented yet.\n");
+}
diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0da01b3287043e07e5b967df8882960cfb814f8f
--- /dev/null
+++ b/unit_tests/operator/Test_MatMulImpl.cpp
@@ -0,0 +1,108 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/MatMul.hpp"
+
+#include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
+    // Test MatMul forward with batch size = 2 and feature size = 75
+    std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
+            {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15},
+             {1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
+              5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
+              9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12,
+              13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15}}});
+    std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
+            {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}});
+
+    std::shared_ptr<Node> myMatMul = MatMul(5, "mymatmul");
+    myMatMul->getOperator()->setDatatype(DataType::Int32);
+    myMatMul->getOperator()->setBackend("cpu");
+    myMatMul->getOperator()->associateInput(1, myWeights);
+
+    SECTION("2D input") {
+        std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
+                {{0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18,
+                  19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+                  38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+                  57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74},
+                 {75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  86,  87,  88,  89,
+                  90,  91,  92,  93,  94,  95,  96,  97,  98,  99,  100, 101, 102, 103, 104,
+                  105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+                  120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+                  135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
+        myMatMul->getOperator()->associateInput(0, myInput);
+        myMatMul->getOperator()->computeOutputDims();
+        myMatMul->forward();
+        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+    }
+    SECTION("4D input") {
+        std::shared_ptr<Tensor> myInput =
+                std::make_shared<Tensor>(Array4D<int, 2, 3, 5, 5>{{{{{0, 1, 2, 3, 4},
+                                                                     {5, 6, 7, 8, 9},
+                                                                     {10, 11, 12, 13, 14},
+                                                                     {15, 16, 17, 18, 19},
+                                                                     {20, 21, 22, 23, 24}},
+                                                                    {{25, 26, 27, 28, 29},
+                                                                     {30, 31, 32, 33, 34},
+                                                                     {35, 36, 37, 38, 39},
+                                                                     {40, 41, 42, 43, 44},
+                                                                     {45, 46, 47, 48, 49}},
+                                                                    {{50, 51, 52, 53, 54},
+                                                                     {55, 56, 57, 58, 59},
+                                                                     {60, 61, 62, 63, 64},
+                                                                     {65, 66, 67, 68, 69},
+                                                                     {70, 71, 72, 73, 74}}},
+                                                                   {{{75, 76, 77, 78, 79},
+                                                                     {80, 81, 82, 83, 84},
+                                                                     {85, 86, 87, 88, 89},
+                                                                     {90, 91, 92, 93, 94},
+                                                                     {95, 96, 97, 98, 99}},
+                                                                    {{100, 101, 102, 103, 104},
+                                                                     {105, 106, 107, 108, 109},
+                                                                     {110, 111, 112, 113, 114},
+                                                                     {115, 116, 117, 118, 119},
+                                                                     {120, 121, 122, 123, 124}},
+                                                                    {{125, 126, 127, 128, 129},
+                                                                     {130, 131, 132, 133, 134},
+                                                                     {135, 136, 137, 138, 139},
+                                                                     {140, 141, 142, 143, 144},
+                                                                     {145, 146, 147, 148, 149}}}}});
+        myMatMul->getOperator()->associateInput(0, myInput);
+        myMatMul->getOperator()->computeOutputDims();
+        myMatMul->forward();
+        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+    }
+
+    // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0] << std::endl;
+}
\ No newline at end of file