diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 80f8408a01be5a9b1f485251af0b13b8069404c5..ffee8c41a6e5adc13bad1d884e840986e7a868bb 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -103,6 +103,22 @@ class Tensor : public Data,
         resize(dims);
     }
 
+    /**
+     * @brief Construct a new Tensor object from the 1-dimension Vector helper.
+     * @tparam T datatype
+     */
+    template <typename T>
+    constexpr Tensor(Vector<T> &&arr)
+        : Data(Type),
+          mDataType(NativeType<T>::type),
+          mDims({arr.data.size()}),
+          mStrides({1}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {arr.data.size()})),
+          mSize(arr.data.size())
+    {
+        mImpl->copyFromHost(&arr.data[0], arr.data.size());
+    }
+
     /**
      * @brief Construct a new Tensor object from the 1-dimension Array helper.
      * @tparam T datatype
@@ -203,6 +219,12 @@ class Tensor : public Data,
      */
     Tensor &operator=(const Tensor& other);
 
+    template <typename T>
+    constexpr Tensor &operator=(Vector<T> &&arr) {
+        *this = Tensor(std::move(arr));
+        return *this;
+    }
+
     template <typename T, std::size_t SIZE_0>
     constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
         *this = Tensor(std::move(arr));
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 48137610fe74fc8839c2e5dcf6db1df10e29d420..e33abcaebc02e8bcdd002efb7c2d8fe45d883906 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -144,6 +144,13 @@ void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 */
 size_t fuseToMetaOps(std::shared_ptr<GraphView> graph, const std::string& query, const std::string& type = "");
 
+/**
+ * Transform Conv layers with MatMul.
+ * @param graph Graph to manipulate
+ * @return size_t Number of replacement
+*/
+size_t convToMatMul(std::shared_ptr<GraphView> graph);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/utils/ArrayHelpers.hpp b/include/aidge/utils/ArrayHelpers.hpp
index b0db3ca11c10c10a3ce63c3c4809cf7ae09173da..4999ea53a11e0c2784ed4ae40243b18aabcda218 100644
--- a/include/aidge/utils/ArrayHelpers.hpp
+++ b/include/aidge/utils/ArrayHelpers.hpp
@@ -101,6 +101,11 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
 }
 
 // Generic helper for initializing a Tensor
+template <typename T>
+struct Vector {
+    std::vector<T> data;
+};
+
 template <typename T, std::size_t SIZE_0>
 struct Array1D {
     T data[SIZE_0];
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 25f1a01877835be3d89ecb969019dd41dfb2753e..a29713b26c261cc8473eb673a8db9d44fe58d893 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -287,13 +287,13 @@ void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOI
     // it into account.
     if (input.first != nullptr) {
       auto it = std::find(ignoredInputs.begin(), ignoredInputs.end(), input);
-      AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input");
+      AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input: {} (of type {})", input.first->name(), input.first->type());
       ignoredInputs.erase(it);
       ++nbInputs;
     }
   }
 
-  AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs");
+  AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs: {} specified vs {} available", nbInputs, mInputNodes.size());
 
   mInputNodes = inputs;
   mInputNodes.insert(mInputNodes.end(), ignoredInputs.begin(), ignoredInputs.end());
@@ -308,13 +308,13 @@ void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IO
     // it into account.
     if (output.first != nullptr) {
       auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
-      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output");
+      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output: {} (of type {})", output.first->name(), output.first->type());
       ignoredOutputs.erase(it);
       ++nbOutputs;
     }
   }
 
-  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs");
+  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs: {} specified vs {} available", nbOutputs, mOutputNodes.size());
 
   mOutputNodes = outputs;
   mOutputNodes.insert(mOutputNodes.end(), ignoredOutputs.begin(), ignoredOutputs.end());
diff --git a/src/recipes/ConvToMatMul.cpp b/src/recipes/ConvToMatMul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cd441c31855d770211dc72da7d14561be7e12e0c
--- /dev/null
+++ b/src/recipes/ConvToMatMul.cpp
@@ -0,0 +1,95 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Unfold.hpp"
+#include "aidge/operator/Fold.hpp"
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
+    const auto matches = SinglePassGraphMatching(graphView).match("Conv");
+
+    size_t nbReplaced = 0;
+    for (const auto& match : matches) {
+        const auto convNode = match.startNode;
+        const std::shared_ptr<Conv_Op<2>> convOp =
+            std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
+
+        AIDGE_ASSERT(convOp->getOutput(0) && !convOp->getOutput(0)->empty(),
+            "Output dims must have been forwarded in order to apply convToMatMul for Conv {}", convNode->name());
+
+        const auto nbDims = convOp->getOutput(0)->dims().size();
+        const std::array<DimSize_t, 2> outputDims = {convOp->getOutput(0)->dims()[nbDims - 2], convOp->getOutput(0)->dims()[nbDims - 1]};
+        const auto wShape = convOp->getInput(1)->dims();
+        const auto wFlattenSize = std::accumulate(wShape.cbegin() + 1, wShape.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
+
+        auto microGraph = std::make_shared<GraphView>();
+        auto unfold = Unfold(convOp->getAttr<std::array<DimSize_t, 2>>("KernelDims"),
+            (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
+            convOp->getAttr<std::array<DimSize_t, 2>>("StrideDims"),
+            convOp->getAttr<std::array<DimSize_t, 2>>("DilationDims"));
+        auto wReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{static_cast<int64_t>(convOp->getInput(1)->dims()[0]), static_cast<int64_t>(wFlattenSize)}}),
+            (!convNode->name().empty()) ? convNode->name() + "_w_reshape_shape_prod" : "",
+            true);
+        auto wReshape = Reshape({},
+            false,
+            (!convNode->name().empty()) ? convNode->name() + "_w_reshape" : "");
+        auto matMul = MatMul((!convNode->name().empty()) ? convNode->name() + "_matmul" : "");
+        auto fold = Fold(outputDims,
+            convOp->getAttr<std::array<DimSize_t, 2>>("KernelDims"),
+            (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
+            convOp->getAttr<std::array<DimSize_t, 2>>("StrideDims"),
+            convOp->getAttr<std::array<DimSize_t, 2>>("DilationDims"));
+
+        wReshapeProd->addChild(wReshape, 0, 1);
+        wReshape->addChild(matMul, 0, 0);
+        unfold->addChild(matMul, 0, 1);
+        matMul->addChild(fold, 0, 0);
+        microGraph->add({unfold, wReshapeProd, wReshape, matMul, fold}, false);
+
+        // Handle bias
+        if (convOp->getInput(2) && !convOp->getInput(2)->empty()) {
+            auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : "");
+
+            fold->addChild(add, 0, 0);
+            microGraph->add({fold, add}, false);
+            microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {add, 1}});
+        }
+        else {
+            // Add a dummy 3rd input in order for replace() to work
+            microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {nullptr, 0}});
+        }
+
+        auto gConv = std::make_shared<GraphView>();
+        gConv->add(convNode, false);
+
+        const auto success = GraphView::replace(gConv, microGraph);
+
+        if (!success) {
+            Log::notice("Could not replace Conv {} with MatMul", convNode->name());
+        }
+        else {
+            ++nbReplaced;
+        }
+    }
+
+    Log::info("Replaced {} (out of {}) matching Conv with MatMul", nbReplaced, matches.size());
+    return nbReplaced;
+}
diff --git a/unit_tests/recipes/Test_ConvToMatMul.cpp b/unit_tests/recipes/Test_ConvToMatMul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b5ecf7c72804413b620546666c11bc14ad809fbe
--- /dev/null
+++ b/unit_tests/recipes/Test_ConvToMatMul.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[ConvToMatMul] conv") {
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2", {1, 1}, {1, 1}, true);
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3", {2, 2});
+
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        conv1,
+        conv2,
+        conv3
+    });
+
+    g1->forwardDims();
+
+    g1->save("convToMatMul_before");
+    REQUIRE(convToMatMul(g1) == 3);
+    g1->save("convToMatMul_after");
+}