diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 80f8408a01be5a9b1f485251af0b13b8069404c5..ffee8c41a6e5adc13bad1d884e840986e7a868bb 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -103,6 +103,22 @@ class Tensor : public Data,
         resize(dims);
     }
 
+    /**
+     * @brief Construct a new Tensor object from the 1-dimension Vector helper.
+     * @tparam T datatype
+     */
+    template <typename T>
+    constexpr Tensor(Vector<T> &&arr)
+        : Data(Type),
+          mDataType(NativeType<T>::type),
+          mDims({arr.data.size()}),
+          mStrides({1}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {arr.data.size()})),
+          mSize(arr.data.size())
+    {
+        mImpl->copyFromHost(&arr.data[0], arr.data.size());
+    }
+
     /**
      * @brief Construct a new Tensor object from the 1-dimension Array helper.
      * @tparam T datatype
@@ -203,6 +219,12 @@ class Tensor : public Data,
      */
     Tensor &operator=(const Tensor& other);
 
+    template <typename T>
+    constexpr Tensor &operator=(Vector<T> &&arr) {
+        *this = Tensor(std::move(arr));
+        return *this;
+    }
+
     template <typename T, std::size_t SIZE_0>
     constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
         *this = Tensor(std::move(arr));
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..28127f9efe437531a64d228f7ed9c168edc39eb6
--- /dev/null
+++ b/include/aidge/operator/Fold.hpp
@@ -0,0 +1,143 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_FOLD_H_
+#define AIDGE_CORE_OPERATOR_FOLD_H_
+
+#include <array>
+#include <cmath>    // std::floor
+#include <cstddef>  // std::size_t
+#include <string>
+#include <utility>  // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
+
+template <DimIdx_t DIM>
+class Fold_Op : public OperatorTensor,
+                public Registrable<Fold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<FoldAttr,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>>;
+    template <FoldAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Fold_Op() = delete;
+
+    constexpr Fold_Op(const std::array<DimSize_t, DIM> &outputDims,
+                      const std::array<DimSize_t, DIM> &kernelDims,
+                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<FoldAttr::OutputDims>(outputDims),
+            attr<FoldAttr::StrideDims>(strideDims),
+            attr<FoldAttr::DilationDims>(dilationDims),
+            attr<FoldAttr::KernelDims>(kernelDims))) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
+     * input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Fold_Op(const Fold_Op<DIM> &op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
+        }
+        else {
+            mImpl = nullptr;
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Fold_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Fold_Op<DIM>>(*this);
+    }
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& outputDims() const { return mAttributes->template getAttr<FoldAttr::OutputDims>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<FoldAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<FoldAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<FoldAttr::KernelDims>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Fold(const std::array<DimSize_t, DIM> &outputDims,
+                                  const std::array<DimSize_t, DIM> &kernelDims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
+    return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Fold(
+    DimSize_t const (&outputDims)[DIM],
+    DimSize_t const (&kernelDims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
+    return Fold(to_array(outputDims), to_array(kernelDims), name, strideDims, dilationDims);
+}
+}  // namespace Aidge
+
+extern template class Aidge::Fold_Op<2>;
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
+    "OutputDims",
+    "StrideDims",
+    "DilationDims",
+    "KernelDims"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_FOLD_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..169fbb05ebeff0e5d38eb9606133d6279cc31cd8
--- /dev/null
+++ b/include/aidge/operator/Unfold.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_UNFOLD_H_
+#define AIDGE_CORE_OPERATOR_UNFOLD_H_
+
+#include <array>
+#include <cmath>    // std::floor
+#include <cstddef>  // std::size_t
+#include <string>
+#include <utility>  // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+template <DimIdx_t DIM>
+class Unfold_OpImpl : public OperatorImpl {
+public:
+    Unfold_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
+
+template <DimIdx_t DIM>
+class Unfold_Op : public OperatorTensor,
+                public Registrable<Unfold_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)> {
+
+public:
+    static const std::string Type;
+
+private:
+    using Attributes_ = StaticAttributes<UnfoldAttr,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>,
+                                        std::array<DimSize_t, DIM>>;
+    template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+    Unfold_Op() = delete;
+
+    constexpr Unfold_Op(const std::array<DimSize_t, DIM> &kernelDims,
+                      const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                      const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1))
+        : OperatorTensor(Type, {InputCategory::Data}, 1),
+          mAttributes(std::make_shared<Attributes_>(
+            attr<UnfoldAttr::StrideDims>(strideDims),
+            attr<UnfoldAttr::DilationDims>(dilationDims),
+            attr<UnfoldAttr::KernelDims>(kernelDims)))
+    {
+        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
+    }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its
+     * input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Unfold_Op(const Unfold_Op<DIM> &op)
+        : OperatorTensor(op),
+          mAttributes(op.mAttributes)
+    {
+        if (!op.backend().empty()) {
+            SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
+        }
+        else {
+            mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);  
+        }
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Unfold_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Unfold_Op>(*this);
+    }
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<UnfoldAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<UnfoldAttr::KernelDims>(); }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Unfold(const std::array<DimSize_t, DIM> &kernelDims,
+                                  const std::string& name = "",
+                                  const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+                                  const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+    return std::make_shared<Node>(std::make_shared<Unfold_Op<static_cast<DimIdx_t>(DIM)>>(kernelDims, strideDims, dilationDims), name);
+}
+
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Unfold(
+    DimSize_t const (&kernelDims)[DIM],
+    const std::string& name = "",
+    const std::array<DimSize_t, DIM> &strideDims = create_array<DimSize_t,DIM>(1),
+    const std::array<DimSize_t, DIM> &dilationDims = create_array<DimSize_t,DIM>(1)) {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
+    return Unfold(to_array(kernelDims), name, strideDims, dilationDims);
+}
+}  // namespace Aidge
+
+extern template class Aidge::Unfold_Op<2>;
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+    "StrideDims",
+    "DilationDims",
+    "KernelDims"
+};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_UNFOLD_H_ */
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 48137610fe74fc8839c2e5dcf6db1df10e29d420..e33abcaebc02e8bcdd002efb7c2d8fe45d883906 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -144,6 +144,13 @@ void expandMetaOps(std::shared_ptr<GraphView> graph, bool recursive = false);
 */
 size_t fuseToMetaOps(std::shared_ptr<GraphView> graph, const std::string& query, const std::string& type = "");
 
+/**
+ * Transform Conv layers with MatMul.
+ * @param graph Graph to manipulate
+ * @return size_t Number of replacement
+*/
+size_t convToMatMul(std::shared_ptr<GraphView> graph);
+
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_UTILS_RECIPES_H_ */
diff --git a/include/aidge/utils/ArrayHelpers.hpp b/include/aidge/utils/ArrayHelpers.hpp
index b0db3ca11c10c10a3ce63c3c4809cf7ae09173da..6648c654d28197dc018b94e8fa300366af52db4a 100644
--- a/include/aidge/utils/ArrayHelpers.hpp
+++ b/include/aidge/utils/ArrayHelpers.hpp
@@ -101,6 +101,13 @@ constexpr std::array<T, N + 1> append(T t, std::array<T, N> a) {
 }
 
 // Generic helper for initializing a Tensor
+template <typename T>
+struct Vector {
+    Vector(const std::vector<T>& data_) : data(data_) {}
+    template <typename U> Vector(const std::vector<U>& data_) : data(data_.begin(), data_.end()) {}
+    std::vector<T> data;
+};
+
 template <typename T, std::size_t SIZE_0>
 struct Array1D {
     T data[SIZE_0];
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 5a11aa20e03bef274f784788dee1ef047cafba42..a7959419ab22fae8443ee9cd3ca286874fa65725 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -287,13 +287,13 @@ void Aidge::GraphView::setOrderedInputs(const std::vector<std::pair<NodePtr, IOI
     // it into account.
     if (input.first != nullptr) {
       auto it = std::find(ignoredInputs.begin(), ignoredInputs.end(), input);
-      AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input");
+      AIDGE_ASSERT(it != ignoredInputs.end(), "unknown or duplicate input: {} (of type {})", input.first->name(), input.first->type());
       ignoredInputs.erase(it);
       ++nbInputs;
     }
   }
 
-  AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs");
+  AIDGE_ASSERT(nbInputs <= mInputNodes.size(), "too many specified number of inputs: {} specified vs {} available", nbInputs, mInputNodes.size());
 
   mInputNodes = inputs;
   mInputNodes.insert(mInputNodes.end(), ignoredInputs.begin(), ignoredInputs.end());
@@ -308,13 +308,13 @@ void Aidge::GraphView::setOrderedOutputs(const std::vector<std::pair<NodePtr, IO
     // it into account.
     if (output.first != nullptr) {
       auto it = std::find(ignoredOutputs.begin(), ignoredOutputs.end(), output);
-      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output");
+      AIDGE_ASSERT(it != ignoredOutputs.end(), "unknown or duplicate output: {} (of type {})", output.first->name(), output.first->type());
       ignoredOutputs.erase(it);
       ++nbOutputs;
     }
   }
 
-  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs");
+  AIDGE_ASSERT(nbOutputs <= mOutputNodes.size(), "too many specified number of outputs: {} specified vs {} available", nbOutputs, mOutputNodes.size());
 
   mOutputNodes = outputs;
   mOutputNodes.insert(mOutputNodes.end(), ignoredOutputs.begin(), ignoredOutputs.end());
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..abe73e54ede0611cb14e24332302c35afa91c2a9
--- /dev/null
+++ b/src/operator/Fold.cpp
@@ -0,0 +1,67 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Fold.hpp"
+
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Fold_Op<DIM>::Type = "Fold";
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Fold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        auto dims(getInput(0)->dims());
+        DimSize_t k = 1;
+        DimSize_t l = 1;
+
+        for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->dilationDims()[dim] *
+                                                    (this->kernelDims()[dim] - 1) + 1;
+
+            k *= this->kernelDims()[dim];
+            l *= 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(this->outputDims()[dim] - kernelExtent) /
+                            static_cast<float>(this->strideDims()[dim])));
+        }
+
+        AIDGE_ASSERT(dims[dims.size() - 2] % k == 0 , "Fold: input number of channels ({}) is not divisible by the product of provided kernel dims ({})!",
+            dims[dims.size() - 2], k);
+        AIDGE_ASSERT(dims[dims.size() - 1] == l, "Fold: mismatch between expected input 3rd dim {} and provided input 3rd dim {}",
+            dims[dims.size() - 1], l);
+
+        dims[dims.size() - 2] /= k;
+        dims.pop_back();
+        dims.insert(dims.end(), this->outputDims().begin(), this->outputDims().end());
+        mOutputs[0]->resize(dims);
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Fold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(Fold_Op<DIM>, *this, name);
+    mOutputs[0]->setBackend(name, device);
+}
+
+template class Aidge::Fold_Op<2>;
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 17b4960dfdfc9de199cc25b0119a5cb000bcf48c..5abfff9d8202003cbe5a76a94fab9d9ab5176b6e 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -53,7 +53,7 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
                 dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
             }
 
-            AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
+            AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes: {} vs {}.", dims0, dims1);
 
             std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
             for (std::size_t i = 0; i < dims_size-2; ++i) {
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 1838c008a6b83548b6a5a80af0363e2cf239b649..4184fc18abbc5490a1d6fbf7363fef817c7ecbc9 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -73,14 +73,12 @@ bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
         {
             int64_t dimSize = this->shape()[i];
             if (dimSize < 0) {
-                if (foundNegativeDimension) {
-                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Found more than one negative dimension in Reshape Operator.");
-                }
+                AIDGE_ASSERT(!foundNegativeDimension, "Found more than one negative dimension in Reshape Operator: {}.", this->shape());
                 foundNegativeDimension = true;
                 dimSize = 1;
                 negativeIndex = static_cast<DimIdx_t>(i);
             }
-            else if (dimSize == 0 && !mAttributes->template getAttr<ReshapeAttr::AllowZero>())
+            else if (dimSize == 0 && !this->allowZero())
             {
                 dimSize = getInput(0) -> dims()[i];
             }
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..94c970fd3a246f0d9e1237e7cce0c15dd8e24526
--- /dev/null
+++ b/src/operator/Unfold.cpp
@@ -0,0 +1,107 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/Unfold.hpp"
+
+#include <cmath>      // std::floor
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
+#include <utility>    // std::pair
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Unfold_OpImpl<DIM>::forward() {
+    const Unfold_Op<DIM>& op = dynamic_cast<const Unfold_Op<DIM>&>(mOp);
+    const auto kernelDims = op.kernelDims();
+    const auto dilationDims = op.dilationDims();
+    const auto strideDims = op.strideDims();
+    const DimSize_t inHeight = op.getInput(0)->dims()[2];
+    const DimSize_t inWidth = op.getInput(0)->dims()[3];
+    const DimSize_t inChannels = op.getInput(0)->dims()[1];
+
+    const DimSize_t kernelExtentHeight = op.dilationDims()[0] *
+                                            (op.kernelDims()[0] - 1) + 1;
+    const DimSize_t outHeight = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inHeight - kernelExtentHeight) /
+                            static_cast<float>(op.strideDims()[0])));
+    const DimSize_t kernelExtentWidth = op.dilationDims()[1] *
+                                            (op.kernelDims()[1] - 1) + 1;
+    const DimSize_t outWidth = 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inWidth - kernelExtentWidth) /
+                            static_cast<float>(op.strideDims()[1])));
+    const DimSize_t outChannels = op.getOutput(0)->dims()[1];
+
+    for (DimSize_t n = 0; n < op.getOutput(0)->dims()[0]; ++n) {
+        for (DimSize_t outC = 0; outC < outChannels; ++outC) {
+            const auto inOffsetW = outC % kernelDims[1];
+            const auto inOffsetH = (outC / kernelDims[1]) % kernelDims[0];
+            const auto inC = outC / kernelDims[0] / kernelDims[1];
+
+            for (DimSize_t outH = 0; outH < outHeight; ++outH) {
+                const auto inH = outH * strideDims[0] + inOffsetH * dilationDims[0];
+
+                for (DimSize_t outW = 0; outW < outWidth; ++outW) {
+                    const auto inW = outW * strideDims[1] + inOffsetW * dilationDims[1];
+
+                    op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(((n * inChannels + inC) * inHeight + inH) * inWidth + inW), 1,
+                        ((n * outChannels + outC) * outHeight + outH) * outWidth + outW);
+                }
+            }
+        }
+    }
+}
+
+template <Aidge::DimIdx_t DIM>
+const std::string Aidge::Unfold_Op<DIM>::Type = "Unfold";
+
+template <Aidge::DimIdx_t DIM>
+bool Aidge::Unfold_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        const std::array<DimSize_t, DIM + 2> inputDims(getInput(0)->template dims<DIM+2>());
+        DimSize_t k = 1;
+        DimSize_t l = 1;
+
+        for (std::size_t dim = 0; dim < this->kernelDims().size() ; ++dim) {
+            const DimSize_t kernelExtent = this->dilationDims()[dim] *
+                                                    (this->kernelDims()[dim] - 1) + 1;
+
+            k *= this->kernelDims()[dim];
+            l *= 1 + static_cast<DimSize_t>(
+                    floor(static_cast<float>(inputDims[dim+2] - kernelExtent) /
+                            static_cast<float>(this->strideDims()[dim])));
+        }
+
+        mOutputs[0]->resize({inputDims[0], inputDims[1] * k, l});
+        return true;
+    }
+
+    return false;
+}
+
+template <Aidge::DimIdx_t DIM>
+void Aidge::Unfold_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    if (Registrar<Unfold_Op<DIM>>::exists({name})){
+        SET_IMPL_MACRO(Unfold_Op<DIM>, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<Unfold_OpImpl<DIM>>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+template class Aidge::Unfold_OpImpl<2>;
+template class Aidge::Unfold_Op<2>;
\ No newline at end of file
diff --git a/src/recipes/ConvToMatMul.cpp b/src/recipes/ConvToMatMul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9b88ffc73204b44cf857213d1fdfff49b3191f73
--- /dev/null
+++ b/src/recipes/ConvToMatMul.cpp
@@ -0,0 +1,114 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Matching.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Unfold.hpp"
+#include "aidge/operator/Fold.hpp"
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+size_t Aidge::convToMatMul(std::shared_ptr<GraphView> graphView) {
+    const auto matches = SinglePassGraphMatching(graphView).match("Conv");
+
+    size_t nbReplaced = 0;
+    for (const auto& match : matches) {
+        const auto convNode = match.startNode;
+        const std::shared_ptr<Conv_Op<2>> convOp =
+            std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator());
+
+        AIDGE_ASSERT(convOp->getOutput(0) && !convOp->getOutput(0)->empty(),
+            "Output dims must have been forwarded in order to apply convToMatMul for Conv {}", convNode->name());
+
+        //const auto nbDims = convOp->getOutput(0)->dims().size();
+        //const std::array<DimSize_t, 2> outputDims = {convOp->getOutput(0)->dims()[nbDims - 2], convOp->getOutput(0)->dims()[nbDims - 1]};
+        const auto wShape = convOp->getInput(1)->dims();
+        const auto wFlattenSize = std::accumulate(wShape.cbegin() + 1, wShape.cend(), DimSize_t(1), std::multiplies<DimSize_t>());
+
+        auto microGraph = std::make_shared<GraphView>();
+        auto unfold = Unfold(convOp->kernelDims(),
+            (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
+            convOp->strideDims(),
+            convOp->dilationDims());
+        auto wReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{static_cast<int64_t>(convOp->getInput(1)->dims()[0]), static_cast<int64_t>(wFlattenSize)}}),
+            (!convNode->name().empty()) ? convNode->name() + "_w_reshape_shape_prod" : "",
+            true);
+        auto wReshape = Reshape({},
+            false,
+            (!convNode->name().empty()) ? convNode->name() + "_w_reshape" : "");
+        auto matMul = MatMul((!convNode->name().empty()) ? convNode->name() + "_matmul" : "");
+        auto reshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>(convOp->getOutput(0)->dims())),
+            (!convNode->name().empty()) ? convNode->name() + "_reshape_shape_prod" : "",
+            true);
+        auto reshape = Reshape({},
+            false,
+            (!convNode->name().empty()) ? convNode->name() + "_reshape" : "");
+        //auto fold = Fold(outputDims,
+        //    convOp->kernelDims(),
+        //    (!convNode->name().empty()) ? convNode->name() + "_unfold" : "",
+        //    convOp->strideDims(),
+        //    convOp->dilationDims());
+
+        wReshapeProd->addChild(wReshape, 0, 1);
+        wReshape->addChild(matMul, 0, 0);
+        unfold->addChild(matMul, 0, 1);
+        reshapeProd->addChild(reshape, 0, 1);
+        matMul->addChild(reshape, 0, 0);
+        //matMul->addChild(fold, 0, 0);
+        microGraph->add({unfold, wReshapeProd, wReshape, matMul, reshapeProd, reshape}, false);
+        //microGraph->add({unfold, wReshapeProd, wReshape, matMul, fold}, false);
+
+        // Handle bias
+        if (convOp->getInput(2) && !convOp->getInput(2)->empty()) {
+            auto add = Add(2, (!convNode->name().empty()) ? convNode->name() + "_add" : "");
+            auto bReshapeProd = Producer(std::make_shared<Tensor>(Vector<int64_t>{{1, static_cast<int64_t>(convOp->getInput(2)->size()), 1, 1}}),
+                (!convNode->name().empty()) ? convNode->name() + "_b_reshape_shape_prod" : "",
+                true);
+            auto bReshape = Reshape({},
+                false,
+                (!convNode->name().empty()) ? convNode->name() + "_b_reshape" : "");
+
+            bReshapeProd->addChild(bReshape, 0, 1);
+            bReshape->addChild(add, 0, 1);
+            reshape->addChild(add, 0, 0);
+            //fold->addChild(add, 0, 0);
+            microGraph->add({reshape, add, bReshapeProd, bReshape}, false);
+            //microGraph->add({fold, add}, false);
+            microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {bReshape, 0}});
+        }
+        else {
+            // Add a dummy 3rd input in order for replace() to work
+            microGraph->setOrderedInputs({{unfold, 0}, {wReshape, 0}, {nullptr, 0}});
+        }
+
+        auto gConv = std::make_shared<GraphView>();
+        gConv->add(convNode, false);
+
+        const auto success = GraphView::replace(gConv, microGraph);
+
+        if (!success) {
+            Log::notice("Could not replace Conv {} with MatMul", convNode->name());
+        }
+        else {
+            ++nbReplaced;
+        }
+    }
+
+    Log::info("Replaced {} (out of {}) matching Conv with MatMul", nbReplaced, matches.size());
+    return nbReplaced;
+}
diff --git a/unit_tests/recipes/Test_ConvToMatMul.cpp b/unit_tests/recipes/Test_ConvToMatMul.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b5ecf7c72804413b620546666c11bc14ad809fbe
--- /dev/null
+++ b/unit_tests/recipes/Test_ConvToMatMul.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[ConvToMatMul] conv") {
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2", {1, 1}, {1, 1}, true);
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3", {2, 2});
+
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        conv1,
+        conv2,
+        conv3
+    });
+
+    g1->forwardDims();
+
+    g1->save("convToMatMul_before");
+    REQUIRE(convToMatMul(g1) == 3);
+    g1->save("convToMatMul_after");
+}