diff --git a/aidge_core/unit_tests/test_recipes.py b/aidge_core/unit_tests/test_recipes.py
index 8a0a470221e118fd450be7a7bf1bf6ede2df6178..c8dd4c727fbaf8224e8d04111a5054caeb5e5c99 100644
--- a/aidge_core/unit_tests/test_recipes.py
+++ b/aidge_core/unit_tests/test_recipes.py
@@ -65,7 +65,7 @@ class test_recipes(unittest.TestCase):
         graph_view.add(b1)
 
         old_nodes = graph_view.get_nodes()
-        aidge_core.fuse_mul_add(graph_view)
+        aidge_core.matmul_to_fc(graph_view)
 
         self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 2)
         self.assertTrue("MatMul0" not in [i.name() for i in graph_view.get_nodes()])
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 3ee64cecaf9b139ec0a7ef9a0fa4acc5b06f57c7..89d7a3a7b0c4d164473869a9d6372c3bf48cd308 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -449,12 +449,16 @@ public:
      */
     constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
 
+    inline DimSize_t dim(DimIdx_t idx) const { return mDims[idx]; }
+
     /**
      * @brief Get strides of the Tensor object.
      * @return constexpr const std::vector<DimSize_t>&
      */
     constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
 
+    inline DimSize_t stride(DimIdx_t idx) const { return mStrides[idx]; }
+
     /**
      * @brief Return true if Tensor is contiguous in memory.
      * @return bool
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..72ff83834962c1860b135a4187e72199b04361db
--- /dev/null
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -0,0 +1,95 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+#define AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class DepthToSpace_OpImpl : public OperatorImpl {
+public:
+    DepthToSpace_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class DepthToSpaceAttr { BlockSize, Mode };
+
+
+class DepthToSpace_Op : public OperatorTensor,
+                public Registrable<DepthToSpace_Op,
+                    std::string,
+                    std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)> {
+public:
+    static const std::string Type;
+    enum class Mode { DCR, CRD };
+
+private:
+    using Attributes_ = StaticAttributes<DepthToSpaceAttr, std::uint32_t, Mode>;
+    template <DepthToSpaceAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    DepthToSpace_Op() = delete;
+
+    DepthToSpace_Op(const std::uint32_t blockSize, const Mode mode = Mode::CRD);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    DepthToSpace_Op(const DepthToSpace_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::DepthToSpace_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
+    inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
+                                    const DepthToSpace_Op::Mode mode = DepthToSpace_Op::Mode::CRD,
+                                    const std::string& name = "");
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+}
+
+#endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index af44a5df5de6908d58951b93921d49ec8e7df708..81900824ed0d26572e593982fa21ed900eda88ee 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -27,15 +27,14 @@ namespace Aidge {
 
 enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
 
-template <DimIdx_t DIM>
 class GridSample_Op : public OperatorTensor,
-	public Registrable<GridSample_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const GridSample_Op<DIM>&)> {
+	public Registrable<GridSample_Op, std::string, std::shared_ptr<OperatorImpl>(const GridSample_Op&)> {
 
 public:
 	static const std::string Type;
 
 	enum class Mode { Linear, Nearest, Cubic };
-	enum class PaddingMode { Zeros, Border, Reflexion };
+	enum class PaddingMode { Zeros, Border, Reflection };
 
 private:
 	using Attributes_ = StaticAttributes<GridSampleAttr, Mode, PaddingMode, bool>;
@@ -49,7 +48,7 @@ public:
 			PaddingMode paddingMode = PaddingMode::Zeros,
 			bool alignCorners = false);
 
-	GridSample_Op(const GridSample_Op<DIM>& other);
+	GridSample_Op(const GridSample_Op& other);
 	~GridSample_Op() noexcept;
 
 public:
@@ -63,7 +62,7 @@ public:
 	inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 	inline Mode mode() const { return mAttributes->template getAttr<GridSampleAttr::Mode>(); }
 	inline PaddingMode paddingMode() const { return mAttributes->template getAttr<GridSampleAttr::PaddingMode>(); }
-	inline bool alignBorders() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
+	inline bool alignCorners() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
 
 	static const std::vector<std::string> getInputsName() {
 		return {"data_input", "grid_field"};
@@ -73,13 +72,9 @@ public:
 	}
 };
 
-extern template class GridSample_Op<1>;
-extern template class GridSample_Op<2>;
-
-template <DimIdx_t DIM>
 std::shared_ptr<Node> GridSample(
-                        typename GridSample_Op<DIM>::Mode mode = GridSample_Op<DIM>::Mode::Linear,
-                        typename GridSample_Op<DIM>::PaddingMode paddingMode = GridSample_Op<DIM>::PaddingMode::Zeros,
+                        typename GridSample_Op::Mode mode = GridSample_Op::Mode::Linear,
+                        typename GridSample_Op::PaddingMode paddingMode = GridSample_Op::PaddingMode::Zeros,
                         bool alignCorners = false,
                         const std::string& name = "");
 
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 205c9f966b7d7cf984dd591daf110d1304216ec0..c42b285dacb6c59c5fa30388c268f1680152a5e0 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -31,18 +31,14 @@ void constantFolding(std::shared_ptr<GraphView> graph);
  *
  * @param nodes Strict set of Node to merge.
  */
-//void fuseMulAdd(std::set<std::shared_ptr<Node>> nodes);
-
-void fuseMulAdd(std::shared_ptr<MatchSolution> solution);
-
-void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add);
+void matMulToFC(std::shared_ptr<Node> matmul, std::shared_ptr<Node> add = nullptr);
 
 /**
  * @brief Merge ``MatMul`` and :cpp:function:`Aidge::Add` Node into a :cpp:function:`Aidge::FC` Node.
  *
  * @param graphView Graph view to use graph matching on, in order to apply transformations.
  */
-void fuseMulAdd(std::shared_ptr<GraphView> graphView);
+void matMulToFC(std::shared_ptr<GraphView> graphView);
 
 /**
  * @brief Remove a node type.
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 34d1ff295093dd35fe5e71cdf335e6147d08194e..49e74f4cbab90f141af5e76df7fbdef6e3794146 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -22,58 +22,51 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Registrar.hpp" // declare_registrable
 
-template <std::size_t DIM>
-static typename Aidge::GridSample_Op<DIM>::Mode stringToInterpolationMode(const std::string& mode) {
-    static std::unordered_map<std::string, typename Aidge::GridSample_Op<DIM>::Mode> map = {
-        {"linear", Aidge::GridSample_Op<DIM>::Mode::Linear},
-        {"nearest", Aidge::GridSample_Op<DIM>::Mode::Nearest},
-        {"cubic", Aidge::GridSample_Op<DIM>::Mode::Cubic}
+
+static typename Aidge::GridSample_Op::Mode stringToInterpolationMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::Mode> map = {
+        {"linear", Aidge::GridSample_Op::Mode::Linear},
+        {"nearest", Aidge::GridSample_Op::Mode::Nearest},
+        {"cubic", Aidge::GridSample_Op::Mode::Cubic}
     };
     return map[mode];
 }
 
-template Aidge::GridSample_Op<1>::Mode stringToInterpolationMode<1>(const std::string&);
-template Aidge::GridSample_Op<2>::Mode stringToInterpolationMode<2>(const std::string&);
-
-template <std::size_t DIM>
-static typename Aidge::GridSample_Op<DIM>::PaddingMode stringToPaddingMode(const std::string& mode) {
-    static std::unordered_map<std::string, typename Aidge::GridSample_Op<DIM>::PaddingMode> map = {
-        {"zeros", Aidge::GridSample_Op<DIM>::PaddingMode::Zeros},
-        {"border", Aidge::GridSample_Op<DIM>::PaddingMode::Border},
-        {"reflexion", Aidge::GridSample_Op<DIM>::PaddingMode::Reflexion}
+static typename Aidge::GridSample_Op::PaddingMode stringToPaddingMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::PaddingMode> map = {
+        {"zeros", Aidge::GridSample_Op::PaddingMode::Zeros},
+        {"border", Aidge::GridSample_Op::PaddingMode::Border},
+        {"reflection", Aidge::GridSample_Op::PaddingMode::Reflection}
     };
     return map[mode];
 }
 
-template Aidge::GridSample_Op<1>::PaddingMode stringToPaddingMode<1>(const std::string&);
-template Aidge::GridSample_Op<2>::PaddingMode stringToPaddingMode<2>(const std::string&);
-
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM> void declare_GridSampleOp(py::module &m) {
-  const std::string pyClassName("GridSampleOp" + std::to_string(DIM) + "D");
-  py::class_<GridSample_Op<DIM>, std::shared_ptr<GridSample_Op<DIM>>, OperatorTensor>(
+void declare_GridSampleOp(py::module &m) {
+  const std::string pyClassName("GridSampleOp");
+  py::class_<GridSample_Op, std::shared_ptr<GridSample_Op>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
         .def(py::init([](const std::string& mode,
                          const std::string& padding_mode,
                          bool align_corners) {
-            return new GridSample_Op<DIM>(stringToInterpolationMode<DIM>(mode), stringToPaddingMode<DIM>(padding_mode), align_corners);
+            return new GridSample_Op(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners);
         }), py::arg("mode") = "linear",
             py::arg("padding_mode") = "zeros",
             py::arg("alogn_corners") = false)
-        .def_static("get_inputs_name", &GridSample_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &GridSample_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", &GridSample_Op::getInputsName)
+        .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
         ;
 
-  declare_registrable<GridSample_Op<DIM>>(m, pyClassName);
+  declare_registrable<GridSample_Op>(m, pyClassName);
 
-  m.def(("GridSample" + std::to_string(DIM) + "D").c_str(), [](const std::string& mode,
-                                                        const std::string& padding_mode,
-                                                        bool align_corners,
-                                                        const std::string& name) {
-        return GridSample<DIM>(stringToInterpolationMode<DIM>(mode), stringToPaddingMode<DIM>(padding_mode), align_corners, name);
+  m.def("GridSample", [](const std::string& mode,
+                        const std::string& padding_mode,
+                        bool align_corners,
+                        const std::string& name) {
+        return GridSample(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners, name);
     }, py::arg("mode"),
        py::arg("padding_mode"),
        py::arg("align_corners"),
@@ -82,9 +75,7 @@ template <DimIdx_t DIM> void declare_GridSampleOp(py::module &m) {
 
 
 void init_GridSample(py::module &m) {
-  declare_GridSampleOp<1>(m);
-  declare_GridSampleOp<2>(m);
-//   declare_GridSampleOp<3>(m);
+  declare_GridSampleOp(m);
 }
 
 } // namespace Aidge
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index c0392287a756b6272a59275b6d12b3a70c1c9420..1c04a320d85a833cc3c0b666390edc7a8648214b 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -25,14 +25,14 @@ void init_Recipes(py::module &m)
 {
 
 
-  m.def("fuse_mul_add", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseMulAdd), py::arg("graph_view"), R"mydelimiter(
+  m.def("matmul_to_fc", static_cast<void(*)(std::shared_ptr<GraphView>)>(matMulToFC), py::arg("graph_view"), R"mydelimiter(
     Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
     )mydelimiter");
 
-  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
+  // m.def("matmul_to_fc", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(matMulToFC), py::arg("nodes"), R"mydelimiter(
   //   recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
 
   //   :param nodes: The MatMul and Add nodes to fuse.
@@ -84,13 +84,6 @@ void init_Recipes(py::module &m)
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
-  // m.def("fuse_mul_add", static_cast<void(*)(std::set<std::shared_ptr<Node>>)>(fuseMulAdd), py::arg("nodes"), R"mydelimiter(
-  //   Recipe to Fuse MatMul and Add operators into an :py:class:`aidge_core.FC` operator.
-
-  //   :param nodes: The MatMul and Add nodes to fuse.
-  //   :type nodes: list of :py:class:`aidge_core.Node`
-  //   )mydelimiter");
-
   m.def("fuse_batchnorm", static_cast<void(*)(std::shared_ptr<GraphView>)>(fuseBatchNorm), py::arg("graph_view"), R"mydelimiter(
     Recipe to remove a flatten operator.
 
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c858548ec484c34a651efa4adec1cde7ccb9e54
--- /dev/null
+++ b/src/operator/DepthToSpace.cpp
@@ -0,0 +1,122 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/DepthToSpace.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::DepthToSpace_OpImpl::forward() {
+    const DepthToSpace_Op& op = dynamic_cast<const DepthToSpace_Op&>(mOp);
+    // suppose an NCHW Tensor format
+
+    // Get input dimensions
+    const auto& dims = op.getInput(0)->dims<4>();
+    // get final output dimension
+    const std::array<DimSize_t, 4> final_dims = op.getOutput(0)->dims<4>();
+
+    std::size_t b = dims[0];
+    std::size_t c = dims[1] / (static_cast<DimSize_t>(op.blockSize()) * static_cast<DimSize_t>(op.blockSize()));
+    std::size_t h = dims[2];
+    std::size_t w = dims[3];
+
+    // Copt input tensor to output
+    op.setOutput(0, op.getInput(0));
+
+    // Step 1: Resize
+    const std::vector<DimSize_t> resize_dims =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({b, c, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), h, w}) :
+            std::vector<DimSize_t>({b, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), c, h, w});
+    op.getOutput(0)->resize(resize_dims);
+
+    // Step 2: Transpose
+    const std::vector<DimSize_t> transpose_order =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({0, 1, 4, 2, 5, 3}) :
+            std::vector<DimSize_t>({0, 3, 4, 1, 5, 2});
+    op.getOutput(0)->copyTranspose(*(op.getOutput(0)), transpose_order);
+
+    // Step 3: Final resize
+    op.getOutput(0)->resize(final_dims);
+}
+
+//////////////////////////////////////////////////////
+
+const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aidge::DepthToSpace_Op::Mode mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<DepthToSpaceAttr::BlockSize>(blockSize),
+        attr<DepthToSpaceAttr::Mode>(mode)))
+{
+    // ctor
+}
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
+    return std::make_shared<DepthToSpace_Op>(*this);
+}
+
+bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4, "{} Operator only accepts 4-D input Tensors.", DepthToSpace_Op::Type);
+        AIDGE_ASSERT(getInput(0)->dims()[1] % (blockSize() * blockSize()) == 0, "Number of channels must be divisible by blocksize squared");
+
+        // Compute output dims
+        const std::array<DimSize_t, 4>& inDims = getInput(0)->dims<4>();
+        const std::vector<DimSize_t> outDims =
+                {inDims[0],
+                 inDims[1] / (static_cast<DimSize_t>(blockSize()) * static_cast<DimSize_t>(blockSize())),
+                 inDims[2] * static_cast<DimSize_t>(blockSize()),
+                 inDims[3] * static_cast<DimSize_t>(blockSize())};
+
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<DepthToSpace_Op>::exists({name})) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<DepthToSpace_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+//////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
+                                    const Aidge::DepthToSpace_Op::Mode mode,
+                                    const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<DepthToSpace_Op>(blockSize, mode), name);
+}
\ No newline at end of file
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 997348d406db6ed4393362941099c93b03d5b9e8..1a2ec88bbfb2bfed134e779619a0a3f0604ce155 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -97,4 +97,4 @@ std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM>
     return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2> &outputDims, const std::array<Aidge::DimSize_t, 2> &kernelDims, const std::string& name, const std::array<Aidge::DimSize_t, 2> &strideDims, const std::array<Aidge::DimSize_t, 2> &dilationDims);
+template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index 6cc0ad7462886e5491ba697dbbe868a5f47e4dd4..fa1efc75a4c0a85717343ce4fcdea1a8adcfb4e7 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -21,13 +21,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::GridSample_Op<DIM>::Type = "GridSample";
 
-template <Aidge::DimIdx_t DIM>
-Aidge::GridSample_Op<DIM>::GridSample_Op(
-    typename Aidge::GridSample_Op<DIM>::Mode mode,
-    typename Aidge::GridSample_Op<DIM>::PaddingMode paddingMode,
+const std::string Aidge::GridSample_Op::Type = "GridSample";
+
+
+Aidge::GridSample_Op::GridSample_Op(
+    typename Aidge::GridSample_Op::Mode mode,
+    typename Aidge::GridSample_Op::PaddingMode paddingMode,
     bool alignCorners)
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 1),
       mAttributes(std::make_shared<Attributes_>(
@@ -38,46 +38,47 @@ Aidge::GridSample_Op<DIM>::GridSample_Op(
     // ctor
 }
 
-template <Aidge::DimIdx_t DIM>
-Aidge::GridSample_Op<DIM>::GridSample_Op(const Aidge::GridSample_Op<DIM>& other)
+
+Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
     : OperatorTensor(other),
       mAttributes(other.mAttributes)
 {
     if (other.mImpl) {
-        SET_IMPL_MACRO(GridSample_Op<DIM>, *this, other.backend());
+        SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
     } else {
         mImpl = nullptr;
     }
 }
 
-template <Aidge::DimIdx_t DIM>
-Aidge::GridSample_Op<DIM>::~GridSample_Op() noexcept = default;
 
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op<DIM>::clone() const {
-    return std::make_shared<GridSample_Op<DIM>>(*this);
+Aidge::GridSample_Op::~GridSample_Op() noexcept = default;
+
+
+std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const {
+    return std::make_shared<GridSample_Op>(*this);
 }
 
-template <Aidge::DimIdx_t DIM>
-bool Aidge::GridSample_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+
+bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
     // TODO: adapt for other formats than NCHW
     if (inputsAssociated()) {
         // check data has batch and channel dimensions: (N, C, D0, D1, ..., DN)
-        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)),
-                    "Wrong input size for {} operator.", type());
+        AIDGE_ASSERT(getInput(0)->nbDims() > 2, "Input should have at least one spatial dimension.");
+        const std::size_t nbSpatialFeat = getInput(0)->nbDims() -2; // all except channels and batchs
         // check grid field
         // should be (N, D0_out, D1_out, ..., DN_out, N+1)
-        AIDGE_ASSERT(((getInput(1)->nbDims() == (DIM+2)) &&
-            (getInput(1)->template dims<DIM+2>()[DIM+1] == DIM) &&
-            (getInput(1)->template dims<DIM+2>()[0] == getInput(0)->template dims<DIM+2>()[0])),
+        AIDGE_ASSERT(((getInput(1)->nbDims() == nbSpatialFeat + 2) &&
+            (getInput(1)->dims()[nbSpatialFeat+1] == nbSpatialFeat) &&
+            (getInput(1)->dims()[0] == getInput(0)->dims()[0])),
             "Wrong grid size {} for {} operator.", getInput(1)->dims(), type());
 
-        std::array<DimSize_t, DIM + 2> outputDims{};
+        std::vector<DimSize_t> outputDims{};
+        outputDims.reserve(nbSpatialFeat+2);
         const std::vector<DimSize_t>& inputDims(getInput(1)->dims());
-        outputDims[1] = getInput(0)->template dims<DIM+2>()[1];
-        outputDims[0] = inputDims[0];
-        for (std::size_t i = 2; i < DIM+2; ++i) {
-            outputDims[i] = inputDims[i-1];
+        outputDims.push_back(inputDims[0]);
+        outputDims.push_back(getInput(0)->dims()[1]);
+        for (std::size_t i = 2; i < nbSpatialFeat+2; ++i) {
+            outputDims.push_back(inputDims[i-1]);
         }
 
         mOutputs[0]->resize(outputDims);
@@ -88,31 +89,26 @@ bool Aidge::GridSample_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
 }
 
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::GridSample_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(GridSample_Op<DIM>, *this, name);
+
+void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(GridSample_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::GridSample_Op<1>;
-template class Aidge::GridSample_Op<2>;
 
 ////////////////////////////////////////////////
 
-template <Aidge::DimIdx_t DIM>
+
 std::shared_ptr<Aidge::Node> Aidge::GridSample(
-                        typename Aidge::GridSample_Op<DIM>::Mode mode,
-                        typename Aidge::GridSample_Op<DIM>::PaddingMode paddingMode,
+                        typename Aidge::GridSample_Op::Mode mode,
+                        typename Aidge::GridSample_Op::PaddingMode paddingMode,
                         bool alignCorners,
                         const std::string& name)
 {
     return std::make_shared<Node>(
-        std::make_shared<GridSample_Op<DIM>>(
+        std::make_shared<GridSample_Op>(
                 mode,
                 paddingMode,
                 alignCorners),
             name);
 }
-
-template std::shared_ptr<Aidge::Node> Aidge::GridSample<1>(typename Aidge::GridSample_Op<1>::Mode, typename Aidge::GridSample_Op<1>::PaddingMode, bool, const std::string&);
-template std::shared_ptr<Aidge::Node> Aidge::GridSample<2>(typename Aidge::GridSample_Op<2>::Mode, typename Aidge::GridSample_Op<2>::PaddingMode, bool, const std::string&);
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/MatMulToFC.cpp
similarity index 60%
rename from src/recipes/FuseMulAdd.cpp
rename to src/recipes/MatMulToFC.cpp
index 6112fc47ece6bb361ebad626be7b5a6b1c2189bd..9b5addd3bb971b3f61980a582d4cce6435c57219 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/MatMulToFC.cpp
@@ -22,28 +22,29 @@
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/operator/MatMul.hpp"
+#include "aidge/graph/Matching.hpp"
 
-//Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
 
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) { //std::set<std::shared_ptr<Node>> nodes){
+void Aidge::matMulToFC(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<Aidge::Node> addNode) {
     // Fuse Mulmat & Add into FC
     // Inputs : old nodes (pointers on mul & add)
-
-    assert((matmulNode->type() == "MatMul" && addNode->type() == "Add") && "Wrong type for the nodes to replace");
+    AIDGE_ASSERT((matmulNode->type() == "MatMul" && (addNode == nullptr || addNode->type() == "Add")),
+        "Wrong type for the nodes to replace: {} and {}",
+        matmulNode->type(), (addNode) ? addNode->type() : "nullptr");
 
 
     // Step 1 : Create FC
     // Fetch the output dimension throught the bias size
     std::shared_ptr<Node> bias = nullptr;
-    if (addNode->getParent(0) == matmulNode) {
-        AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the fuseMulAdd recipe.");
-        bias = addNode->getParent(1);
-    }
-    else if (addNode->getParent(1) == matmulNode) {
-        AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the fuseMulAdd recipe.");
-        bias = addNode->getParent(0);
+    if (addNode) {
+        if (addNode->getParent(0) == matmulNode) {
+            AIDGE_ASSERT(addNode->getParent(1), "No bias detected to produce the matMulToFC recipe.");
+            bias = addNode->getParent(1);
+        }
+        else if (addNode->getParent(1) == matmulNode) {
+            AIDGE_ASSERT(addNode->getParent(0), "No bias detected to produce the matMulToFC recipe.");
+            bias = addNode->getParent(0);
+        }
     }
 
     std::shared_ptr<Node> weight = nullptr;
@@ -75,24 +76,9 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
 
-    // TODO: find another way to get OutChannels for FC operator.
-    // This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output
-    DimSize_t outSize = 0;
-    AIDGE_ASSERT(addNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
-    const auto& op = std::static_pointer_cast<OperatorTensor>(addNode->getOperator());
-    for (size_t i = 0; i < op->nbInputs(); i++)
-    {
-        const auto& inTensor = op->getInput(i);
-        if(inTensor->nbDims() > 0) {
-            outSize = inTensor->dims()[inTensor->nbDims()-1];
-            break;
-        }
-    }
-    AIDGE_ASSERT(outSize, "Could not get output number of channels for FC operator.");
-
     // Instanciate FC
     std::string fcName = matmulNode->name();
-    if (!addNode->name().empty()) {
+    if (addNode && !addNode->name().empty()) {
         fcName += "_" + addNode->name();
     }
 
@@ -105,43 +91,26 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
         bias->cloneSharedOperators()->addChild(fc, 0, 2);
     }
 
-
     // Step 3 : Update all graphviews that contains at least one node to replace
         // Case 1 : If all nodes are in a graph view : delete old nodes & branch input & output
         // Case 2 : If not all nodes are in a graph view : only delete the nodes from the graphview
         // Maybe create a central mechanism to update automatically all graph views rather than each node have graphview presence memory?
-    auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1), fc->getParent(2)});
-    GraphView::replace({matmulNode, addNode, bias, weight}, newNodes);
-
-}
-
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::MatchSolution> solution){
-
-    assert(solution->at("MatMul").size() == 1 && "Wrong number of nodes MatMul to replace\n");
-    assert(solution->at("Add").size() == 1 && "Wrong number of nodes Add to replace\n");
-
-    for (const auto& matmulNode : solution->at("MatMul")) {
-        for (const auto& addNode : solution->at("Add")) {
-            fuseMulAdd(matmulNode,addNode);
-        }
+    if (addNode) {
+        auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1), fc->getParent(2)});
+        GraphView::replace({matmulNode, addNode, bias, weight}, newNodes);
+    }
+    else {
+        auto newNodes = std::set<std::shared_ptr<Node>>({fc, fc->getParent(1)});
+        GraphView::replace({matmulNode, weight}, newNodes);
     }
-}
-
-
-void Aidge::fuseMulAdd(std::shared_ptr<Aidge::GraphView> graphView){
-
-
-    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-    regex->setNodeKey("Add","getType($) =='Add'");
-    regex->setNodeKey("MatMul","getType($) =='MatMul'");
-    regex->addQuery("MatMul -> Add ;");
-
-    for (const auto& solution : regex->match(graphView)) {
-
-        fuseMulAdd(solution);
 
+}
 
+void Aidge::matMulToFC(std::shared_ptr<Aidge::GraphView> graphView){
+    const auto matches = SinglePassGraphMatching(graphView).match("MatMul->Add#?");
 
+    for (const auto& match : matches) {
+        const auto it = match.anchors.find("Add");
+        matMulToFC(match.graph->rootNode(), (it != match.anchors.end()) ? it->second.at("#") : nullptr);
     }
 }
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index fbbc3f766857f15af0da8004c35078993d71e973..e05e105d34a981e33cc1a0baaffa2702f1f6bbbb 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -189,7 +189,7 @@ TEST_CASE("GraphRegexUser") {
         kitchenBook->setNodeKey("Flatten","getType($) =='Flatten'");
         kitchenBook->setNodeKey("FC","getType($) =='FC'");
 
-        kitchenBook->addQuery("MatMul->Add",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(fuseMulAdd));
+        //kitchenBook->addQuery("MatMul->Add",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(fuseMulAdd));
         kitchenBook->addQuery("Flatten->FC",static_cast<void(*)(std::shared_ptr<MatchSolution>)>(removeFlatten));
 
         kitchenBook->appliedRecipes(g);
diff --git a/unit_tests/operator/Test_DepthToSpaceImpl.cpp b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62f760ce8b3942ab3101ff5e1324307a46048b91
--- /dev/null
+++ b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
@@ -0,0 +1,87 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/DepthToSpace.hpp"
+
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] DepthToSpace_Op", "[DepthToSpace][forwardDims]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+    SECTION("Nb dimensions") {
+        // Create DepthToSpace operator with block_size of 1 compatible with any size
+        std::shared_ptr<Node> myDTS = DepthToSpace(1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myDTS -> getOperator());
+
+        SECTION("Scalar") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(9);
+            op -> associateInput(0,T0);
+            REQUIRE_THROWS(op->forwardDims());
+        }
+        SECTION("+1-D") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+            op -> associateInput(0,T0);
+
+            for (std::uint16_t nb_dims = 0; nb_dims < 6; ++nb_dims) {
+
+                std::vector<std::size_t> dims0(nb_dims);
+                for (std::size_t i = 0; i < nb_dims; ++i) {
+                    dims0[i] = dimsDist(gen);
+                }
+                T0->resize(dims0);
+                if (nb_dims == 4) {
+                    REQUIRE_NOTHROW(op->forwardDims());
+                } else {
+                    REQUIRE_THROWS(op->forwardDims());
+                }
+            }
+        }
+    }
+
+    SECTION("Propagation") {
+        // input_0 with 4-D in NCHW format
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(std::vector<DimSize_t>({1, 16, 100, 100}));
+
+        DepthToSpace_Op myDTS_should_throw = DepthToSpace_Op(7);
+        myDTS_should_throw.associateInput(0,T0);
+
+        REQUIRE_THROWS(myDTS_should_throw.forwardDims());
+
+        DepthToSpace_Op myDTS_should_not_throw = DepthToSpace_Op(4);
+        myDTS_should_not_throw.associateInput(0,T0);
+
+        REQUIRE_NOTHROW(myDTS_should_not_throw.forwardDims());
+        REQUIRE(myDTS_should_not_throw.getOutput(0)->dims() == std::vector<std::size_t>({1,1,400,400}));
+    }
+}
+
+TEST_CASE("[core/operator] DepthToSpace_Op impl", "[DepthToSpace][forward]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_GridSample_Op.cpp b/unit_tests/operator/Test_GridSample_Op.cpp
index 754cdb705dcbc5115af32bb0994fbb08ba633c3f..ae38ec7083a0df49fb241509bf52895765ddb0e8 100644
--- a/unit_tests/operator/Test_GridSample_Op.cpp
+++ b/unit_tests/operator/Test_GridSample_Op.cpp
@@ -33,7 +33,7 @@ TEST_CASE("[core/operator] GridSample_Op(forwardDims)", "[GridSample][forwardDim
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
 
     // Create GridSample Operator
-    std::shared_ptr<Node> myGridSample = GridSample<2>(GridSample_Op<2>::Mode::Cubic, GridSample_Op<2>::PaddingMode::Border, false);
+    std::shared_ptr<Node> myGridSample = GridSample(GridSample_Op::Mode::Cubic, GridSample_Op::PaddingMode::Border, false);
     auto op = std::static_pointer_cast<OperatorTensor>(myGridSample -> getOperator());
 
     // input_0
diff --git a/unit_tests/recipes/Test_FuseMulAdd.cpp b/unit_tests/recipes/Test_FuseMulAdd.cpp
deleted file mode 100644
index 9ea151039f07e5c688572d61b746d8fc26f1c3fe..0000000000000000000000000000000000000000
--- a/unit_tests/recipes/Test_FuseMulAdd.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <catch2/catch_test_macros.hpp>
-#include <set>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/MatMul.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/recipes/Recipes.hpp"
-
-namespace Aidge {
-
-
-TEST_CASE("[cpu/recipes] FuseMulAdd", "[FuseMulAdd][recipes]") {
-    // generate the original GraphView
-    auto matmul0 = MatMul("matmul0");
-    auto add0 = Add(2, "add0");
-    auto matmul1 = MatMul("matmul1");
-    auto add1 = Add(2, "add1");
-
-    auto b0 = Producer({5}, "B0");
-    auto w0 = Producer({5, 5}, "W0");
-    auto b1 = Producer({5}, "B1");
-    auto w1 = Producer({5,5},"W1");
-    auto input = Producer({2,5}, "input");
-
-    input->addChild(matmul0, 0, 0);
-    w0->addChild(matmul0, 0, 1);
-
-    matmul0->addChild(add0, 0, 0);
-    b0->addChild(add0, 0, 1);
-
-    add0->addChild(matmul1, 0, 1);
-    w1->addChild(matmul1, 0, 0);
-
-    matmul1->addChild(add1, 0, 0);
-    b1->addChild(add1, 0, 1);
-
-    auto g = std::make_shared<GraphView>();
-    g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
-
-    // Check original graph
-    REQUIRE(g->getNodes() ==
-            std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
-    REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
-    REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
-    REQUIRE(((matmul1->getParent(1) == add0) && (matmul1->getParent(0) == w1)));
-    REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
-
-	// Transform GraphView inplace
-    fuseMulAdd(g);
-
-	// Check new GraphView
-	std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
-	REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
-	REQUIRE(newNodes.size() == 6);
-	for (const auto& node : newNodes) {
-		REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
-	}
-}
-
-}  // namespace Aidge
diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2adf882ca69e0d5ca5f050d1b89cfb09d81b536b
--- /dev/null
+++ b/unit_tests/recipes/Test_MatMulToFC.cpp
@@ -0,0 +1,118 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <set>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/recipes/Recipes.hpp"
+
+namespace Aidge {
+
+
+TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
+    SECTION("with Add") {
+        // generate the original GraphView
+        auto matmul0 = MatMul("matmul0");
+        auto add0 = Add(2, "add0");
+        auto matmul1 = MatMul("matmul1");
+        auto add1 = Add(2, "add1");
+
+        auto b0 = Producer({5}, "B0");
+        auto w0 = Producer({5, 5}, "W0");
+        auto b1 = Producer({5}, "B1");
+        auto w1 = Producer({5,5},"W1");
+        auto input = Producer({2,5}, "input");
+
+        input->addChild(matmul0, 0, 0);
+        w0->addChild(matmul0, 0, 1);
+
+        matmul0->addChild(add0, 0, 0);
+        b0->addChild(add0, 0, 1);
+
+        add0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
+
+        matmul1->addChild(add1, 0, 0);
+        b1->addChild(add1, 0, 1);
+
+        auto g = std::make_shared<GraphView>();
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
+
+        // Check original graph
+        REQUIRE(g->getNodes() ==
+                std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+        REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+        REQUIRE(((add0->getParent(0) == matmul0) && (add0->getParent(1) == b0)));
+        REQUIRE(((matmul1->getParent(1) == add0) && (matmul1->getParent(0) == w1)));
+        REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+        // Transform GraphView inplace
+        matMulToFC(g);
+
+        // Check new GraphView
+        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, b0, add0, w1, matmul1, b1, add1}));
+        REQUIRE(newNodes.size() == 6);
+        for (const auto& node : newNodes) {
+            REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+        }
+    }
+
+    SECTION("without Add") {
+        // generate the original GraphView
+        auto matmul0 = MatMul("matmul0");
+        auto matmul1 = MatMul("matmul1");
+        auto add1 = Add(2, "add1");
+
+        auto w0 = Producer({5, 5}, "W0");
+        auto b1 = Producer({5}, "B1");
+        auto w1 = Producer({5,5},"W1");
+        auto input = Producer({2,5}, "input");
+
+        input->addChild(matmul0, 0, 0);
+        w0->addChild(matmul0, 0, 1);
+
+        matmul0->addChild(matmul1, 0, 1);
+        w1->addChild(matmul1, 0, 0);
+
+        matmul1->addChild(add1, 0, 0);
+        b1->addChild(add1, 0, 1);
+
+        auto g = std::make_shared<GraphView>();
+        g->add({w0, matmul0, w1, matmul1, b1, add1});
+
+        // Check original graph
+        REQUIRE(g->getNodes() ==
+                std::set<std::shared_ptr<Node>>({w0, matmul0, w1, matmul1, b1, add1}));
+        REQUIRE(((matmul0->getParent(0) == input) && (matmul0->getParent(1) == w0)));
+        REQUIRE(((matmul1->getParent(1) == matmul0) && (matmul1->getParent(0) == w1)));
+        REQUIRE(((add1->getParent(0) == matmul1) && (add1->getParent(1) == b1)));
+
+        // Transform GraphView inplace
+        matMulToFC(g);
+
+        // Check new GraphView
+        std::set<std::shared_ptr<Node>> newNodes = g->getNodes();
+        REQUIRE(newNodes != std::set<std::shared_ptr<Node>>({w0, matmul0, w1, matmul1, b1, add1}));
+        REQUIRE(newNodes.size() == 5);
+        for (const auto& node : newNodes) {
+            REQUIRE(((node->type() == "Producer") || (node->type() == "FC")));
+        }
+    }
+}
+
+}  // namespace Aidge