diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 3ee64cecaf9b139ec0a7ef9a0fa4acc5b06f57c7..89d7a3a7b0c4d164473869a9d6372c3bf48cd308 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -449,12 +449,16 @@ public:
      */
     constexpr inline const std::vector<DimSize_t>& dims() const noexcept { return mDims; }
 
+    inline DimSize_t dim(DimIdx_t idx) const { return mDims[idx]; }
+
     /**
      * @brief Get strides of the Tensor object.
      * @return constexpr const std::vector<DimSize_t>&
      */
     constexpr inline const std::vector<DimSize_t>& strides() const noexcept { return mStrides; }
 
+    inline DimSize_t stride(DimIdx_t idx) const { return mStrides[idx]; }
+
     /**
      * @brief Return true if Tensor is contiguous in memory.
      * @return bool
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..72ff83834962c1860b135a4187e72199b04361db
--- /dev/null
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -0,0 +1,95 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+#define AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
+
+#include <array>
+#include <memory>
+#include <vector>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+class DepthToSpace_OpImpl : public OperatorImpl {
+public:
+    DepthToSpace_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    void forward() override;
+};
+
+enum class DepthToSpaceAttr { BlockSize, Mode };
+
+
+class DepthToSpace_Op : public OperatorTensor,
+                public Registrable<DepthToSpace_Op,
+                    std::string,
+                    std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)> {
+public:
+    static const std::string Type;
+    enum class Mode { DCR, CRD };
+
+private:
+    using Attributes_ = StaticAttributes<DepthToSpaceAttr, std::uint32_t, Mode>;
+    template <DepthToSpaceAttr e>
+    using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
+
+    DepthToSpace_Op() = delete;
+
+    DepthToSpace_Op(const std::uint32_t blockSize, const Mode mode = Mode::CRD);
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s),
+     * but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    DepthToSpace_Op(const DepthToSpace_Op& op);
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::DepthToSpace_Op
+     */
+    std::shared_ptr<Operator> clone() const override;
+
+    bool forwardDims(bool /*allowDataDependency*/ = false) override final;
+
+    void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
+
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
+    inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
+
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName() {
+        return {"data_output"};
+    }
+};
+
+std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
+                                    const DepthToSpace_Op::Mode mode = DepthToSpace_Op::Mode::CRD,
+                                    const std::string& name = "");
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+}
+
+#endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index af44a5df5de6908d58951b93921d49ec8e7df708..81900824ed0d26572e593982fa21ed900eda88ee 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -27,15 +27,14 @@ namespace Aidge {
 
 enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
 
-template <DimIdx_t DIM>
 class GridSample_Op : public OperatorTensor,
-	public Registrable<GridSample_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const GridSample_Op<DIM>&)> {
+	public Registrable<GridSample_Op, std::string, std::shared_ptr<OperatorImpl>(const GridSample_Op&)> {
 
 public:
 	static const std::string Type;
 
 	enum class Mode { Linear, Nearest, Cubic };
-	enum class PaddingMode { Zeros, Border, Reflexion };
+	enum class PaddingMode { Zeros, Border, Reflection };
 
 private:
 	using Attributes_ = StaticAttributes<GridSampleAttr, Mode, PaddingMode, bool>;
@@ -49,7 +48,7 @@ public:
 			PaddingMode paddingMode = PaddingMode::Zeros,
 			bool alignCorners = false);
 
-	GridSample_Op(const GridSample_Op<DIM>& other);
+	GridSample_Op(const GridSample_Op& other);
 	~GridSample_Op() noexcept;
 
 public:
@@ -63,7 +62,7 @@ public:
 	inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 	inline Mode mode() const { return mAttributes->template getAttr<GridSampleAttr::Mode>(); }
 	inline PaddingMode paddingMode() const { return mAttributes->template getAttr<GridSampleAttr::PaddingMode>(); }
-	inline bool alignBorders() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
+	inline bool alignCorners() const { return mAttributes->template getAttr<GridSampleAttr::AlignCorners>(); }
 
 	static const std::vector<std::string> getInputsName() {
 		return {"data_input", "grid_field"};
@@ -73,13 +72,9 @@ public:
 	}
 };
 
-extern template class GridSample_Op<1>;
-extern template class GridSample_Op<2>;
-
-template <DimIdx_t DIM>
 std::shared_ptr<Node> GridSample(
-                        typename GridSample_Op<DIM>::Mode mode = GridSample_Op<DIM>::Mode::Linear,
-                        typename GridSample_Op<DIM>::PaddingMode paddingMode = GridSample_Op<DIM>::PaddingMode::Zeros,
+                        typename GridSample_Op::Mode mode = GridSample_Op::Mode::Linear,
+                        typename GridSample_Op::PaddingMode paddingMode = GridSample_Op::PaddingMode::Zeros,
                         bool alignCorners = false,
                         const std::string& name = "");
 
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index e33abcaebc02e8bcdd002efb7c2d8fe45d883906..205c9f966b7d7cf984dd591daf110d1304216ec0 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -98,10 +98,6 @@ void removeFlatten(std::shared_ptr<GraphView> graphView);
  */
 void fuseBatchNorm(std::shared_ptr<Node> conv,std::shared_ptr<Node> batchnorm);
 
-
-
-void fuseBatchNorm(std::shared_ptr<MatchSolution> solution);
-
 /**
  * @brief Fuse :cpp:function:`Aidge::BatchNorm` with :cpp:function:`Aidge::Conv` or :cpp:function:`Aidge::FC` Nodes.
  * Ref: https://nenadmarkus.com/p/fusing-batchnorm-and-conv/
diff --git a/python_binding/operator/pybind_GridSample.cpp b/python_binding/operator/pybind_GridSample.cpp
index 34d1ff295093dd35fe5e71cdf335e6147d08194e..49e74f4cbab90f141af5e76df7fbdef6e3794146 100644
--- a/python_binding/operator/pybind_GridSample.cpp
+++ b/python_binding/operator/pybind_GridSample.cpp
@@ -22,58 +22,51 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Registrar.hpp" // declare_registrable
 
-template <std::size_t DIM>
-static typename Aidge::GridSample_Op<DIM>::Mode stringToInterpolationMode(const std::string& mode) {
-    static std::unordered_map<std::string, typename Aidge::GridSample_Op<DIM>::Mode> map = {
-        {"linear", Aidge::GridSample_Op<DIM>::Mode::Linear},
-        {"nearest", Aidge::GridSample_Op<DIM>::Mode::Nearest},
-        {"cubic", Aidge::GridSample_Op<DIM>::Mode::Cubic}
+
+static typename Aidge::GridSample_Op::Mode stringToInterpolationMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::Mode> map = {
+        {"linear", Aidge::GridSample_Op::Mode::Linear},
+        {"nearest", Aidge::GridSample_Op::Mode::Nearest},
+        {"cubic", Aidge::GridSample_Op::Mode::Cubic}
     };
     return map[mode];
 }
 
-template Aidge::GridSample_Op<1>::Mode stringToInterpolationMode<1>(const std::string&);
-template Aidge::GridSample_Op<2>::Mode stringToInterpolationMode<2>(const std::string&);
-
-template <std::size_t DIM>
-static typename Aidge::GridSample_Op<DIM>::PaddingMode stringToPaddingMode(const std::string& mode) {
-    static std::unordered_map<std::string, typename Aidge::GridSample_Op<DIM>::PaddingMode> map = {
-        {"zeros", Aidge::GridSample_Op<DIM>::PaddingMode::Zeros},
-        {"border", Aidge::GridSample_Op<DIM>::PaddingMode::Border},
-        {"reflexion", Aidge::GridSample_Op<DIM>::PaddingMode::Reflexion}
+static typename Aidge::GridSample_Op::PaddingMode stringToPaddingMode(const std::string& mode) {
+    static std::unordered_map<std::string, typename Aidge::GridSample_Op::PaddingMode> map = {
+        {"zeros", Aidge::GridSample_Op::PaddingMode::Zeros},
+        {"border", Aidge::GridSample_Op::PaddingMode::Border},
+        {"reflection", Aidge::GridSample_Op::PaddingMode::Reflection}
     };
     return map[mode];
 }
 
-template Aidge::GridSample_Op<1>::PaddingMode stringToPaddingMode<1>(const std::string&);
-template Aidge::GridSample_Op<2>::PaddingMode stringToPaddingMode<2>(const std::string&);
-
 namespace py = pybind11;
 namespace Aidge {
 
-template <DimIdx_t DIM> void declare_GridSampleOp(py::module &m) {
-  const std::string pyClassName("GridSampleOp" + std::to_string(DIM) + "D");
-  py::class_<GridSample_Op<DIM>, std::shared_ptr<GridSample_Op<DIM>>, OperatorTensor>(
+void declare_GridSampleOp(py::module &m) {
+  const std::string pyClassName("GridSampleOp");
+  py::class_<GridSample_Op, std::shared_ptr<GridSample_Op>, OperatorTensor>(
     m, pyClassName.c_str(),
     py::multiple_inheritance())
         .def(py::init([](const std::string& mode,
                          const std::string& padding_mode,
                          bool align_corners) {
-            return new GridSample_Op<DIM>(stringToInterpolationMode<DIM>(mode), stringToPaddingMode<DIM>(padding_mode), align_corners);
+            return new GridSample_Op(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners);
         }), py::arg("mode") = "linear",
             py::arg("padding_mode") = "zeros",
             py::arg("alogn_corners") = false)
-        .def_static("get_inputs_name", &GridSample_Op<DIM>::getInputsName)
-        .def_static("get_outputs_name", &GridSample_Op<DIM>::getOutputsName)
+        .def_static("get_inputs_name", &GridSample_Op::getInputsName)
+        .def_static("get_outputs_name", &GridSample_Op::getOutputsName)
         ;
 
-  declare_registrable<GridSample_Op<DIM>>(m, pyClassName);
+  declare_registrable<GridSample_Op>(m, pyClassName);
 
-  m.def(("GridSample" + std::to_string(DIM) + "D").c_str(), [](const std::string& mode,
-                                                        const std::string& padding_mode,
-                                                        bool align_corners,
-                                                        const std::string& name) {
-        return GridSample<DIM>(stringToInterpolationMode<DIM>(mode), stringToPaddingMode<DIM>(padding_mode), align_corners, name);
+  m.def("GridSample", [](const std::string& mode,
+                        const std::string& padding_mode,
+                        bool align_corners,
+                        const std::string& name) {
+        return GridSample(stringToInterpolationMode(mode), stringToPaddingMode(padding_mode), align_corners, name);
     }, py::arg("mode"),
        py::arg("padding_mode"),
        py::arg("align_corners"),
@@ -82,9 +75,7 @@ template <DimIdx_t DIM> void declare_GridSampleOp(py::module &m) {
 
 
 void init_GridSample(py::module &m) {
-  declare_GridSampleOp<1>(m);
-  declare_GridSampleOp<2>(m);
-//   declare_GridSampleOp<3>(m);
+  declare_GridSampleOp(m);
 }
 
 } // namespace Aidge
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0c858548ec484c34a651efa4adec1cde7ccb9e54
--- /dev/null
+++ b/src/operator/DepthToSpace.cpp
@@ -0,0 +1,122 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/operator/DepthToSpace.hpp"
+
+#include <array>
+#include <cstddef>  // std::size_t
+#include <string>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+#include "aidge/utils/Types.h"
+
+void Aidge::DepthToSpace_OpImpl::forward() {
+    const DepthToSpace_Op& op = dynamic_cast<const DepthToSpace_Op&>(mOp);
+    // suppose an NCHW Tensor format
+
+    // Get input dimensions
+    const auto& dims = op.getInput(0)->dims<4>();
+    // get final output dimension
+    const std::array<DimSize_t, 4> final_dims = op.getOutput(0)->dims<4>();
+
+    std::size_t b = dims[0];
+    std::size_t c = dims[1] / (static_cast<DimSize_t>(op.blockSize()) * static_cast<DimSize_t>(op.blockSize()));
+    std::size_t h = dims[2];
+    std::size_t w = dims[3];
+
+    // Copt input tensor to output
+    op.setOutput(0, op.getInput(0));
+
+    // Step 1: Resize
+    const std::vector<DimSize_t> resize_dims =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({b, c, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), h, w}) :
+            std::vector<DimSize_t>({b, static_cast<DimSize_t>(op.blockSize()), static_cast<DimSize_t>(op.blockSize()), c, h, w});
+    op.getOutput(0)->resize(resize_dims);
+
+    // Step 2: Transpose
+    const std::vector<DimSize_t> transpose_order =
+        (op.mode() == DepthToSpace_Op::Mode::CRD) ?
+            std::vector<DimSize_t>({0, 1, 4, 2, 5, 3}) :
+            std::vector<DimSize_t>({0, 3, 4, 1, 5, 2});
+    op.getOutput(0)->copyTranspose(*(op.getOutput(0)), transpose_order);
+
+    // Step 3: Final resize
+    op.getOutput(0)->resize(final_dims);
+}
+
+//////////////////////////////////////////////////////
+
+const std::string Aidge::DepthToSpace_Op::Type = "DepthToSpace";
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aidge::DepthToSpace_Op::Mode mode)
+    : OperatorTensor(Type, {InputCategory::Data}, 1),
+        mAttributes(std::make_shared<Attributes_>(
+        attr<DepthToSpaceAttr::BlockSize>(blockSize),
+        attr<DepthToSpaceAttr::Mode>(mode)))
+{
+    // ctor
+}
+
+Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
+    : OperatorTensor(op),
+      mAttributes(op.mAttributes)
+{
+    if (op.mImpl) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
+    } else {
+        mImpl = nullptr;
+    }
+}
+
+std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
+    return std::make_shared<DepthToSpace_Op>(*this);
+}
+
+bool Aidge::DepthToSpace_Op::forwardDims(bool /*allowDataDependency*/) {
+    if (inputsAssociated()) {
+        AIDGE_ASSERT(getInput(0)->nbDims() == 4, "{} Operator only accepts 4-D input Tensors.", DepthToSpace_Op::Type);
+        AIDGE_ASSERT(getInput(0)->dims()[1] % (blockSize() * blockSize()) == 0, "Number of channels must be divisible by blocksize squared");
+
+        // Compute output dims
+        const std::array<DimSize_t, 4>& inDims = getInput(0)->dims<4>();
+        const std::vector<DimSize_t> outDims =
+                {inDims[0],
+                 inDims[1] / (static_cast<DimSize_t>(blockSize()) * static_cast<DimSize_t>(blockSize())),
+                 inDims[2] * static_cast<DimSize_t>(blockSize()),
+                 inDims[3] * static_cast<DimSize_t>(blockSize())};
+
+        mOutputs[0]->resize(outDims);
+        return true;
+    }
+
+    return false;
+}
+
+void Aidge::DepthToSpace_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
+    if (Registrar<DepthToSpace_Op>::exists({name})) {
+        SET_IMPL_MACRO(DepthToSpace_Op, *this, name);
+    }
+    else {
+        mImpl = std::make_shared<DepthToSpace_OpImpl>(*this);
+    }
+    mOutputs[0]->setBackend(name, device);
+}
+
+//////////////////////////////////////////////////////////
+
+std::shared_ptr<Aidge::Node> Aidge::DepthToSpace(const std::uint32_t blockSize,
+                                    const Aidge::DepthToSpace_Op::Mode mode,
+                                    const std::string& name) {
+    return std::make_shared<Node>(std::make_shared<DepthToSpace_Op>(blockSize, mode), name);
+}
\ No newline at end of file
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 997348d406db6ed4393362941099c93b03d5b9e8..1a2ec88bbfb2bfed134e779619a0a3f0604ce155 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -97,4 +97,4 @@ std::shared_ptr<Aidge::Node> Aidge::Fold(const std::array<Aidge::DimSize_t, DIM>
     return std::make_shared<Node>(std::make_shared<Fold_Op<static_cast<DimIdx_t>(DIM)>>(outputDims, kernelDims, strideDims, dilationDims), name);
 }
 
-template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2> &outputDims, const std::array<Aidge::DimSize_t, 2> &kernelDims, const std::string& name, const std::array<Aidge::DimSize_t, 2> &strideDims, const std::array<Aidge::DimSize_t, 2> &dilationDims);
+template std::shared_ptr<Aidge::Node> Aidge::Fold<2>(const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&, const std::string&, const std::array<Aidge::DimSize_t, 2>&, const std::array<Aidge::DimSize_t, 2>&);
\ No newline at end of file
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index 6cc0ad7462886e5491ba697dbbe868a5f47e4dd4..fa1efc75a4c0a85717343ce4fcdea1a8adcfb4e7 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -21,13 +21,13 @@
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
-template <Aidge::DimIdx_t DIM>
-const std::string Aidge::GridSample_Op<DIM>::Type = "GridSample";
 
-template <Aidge::DimIdx_t DIM>
-Aidge::GridSample_Op<DIM>::GridSample_Op(
-    typename Aidge::GridSample_Op<DIM>::Mode mode,
-    typename Aidge::GridSample_Op<DIM>::PaddingMode paddingMode,
+const std::string Aidge::GridSample_Op::Type = "GridSample";
+
+
+Aidge::GridSample_Op::GridSample_Op(
+    typename Aidge::GridSample_Op::Mode mode,
+    typename Aidge::GridSample_Op::PaddingMode paddingMode,
     bool alignCorners)
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param}, 1),
       mAttributes(std::make_shared<Attributes_>(
@@ -38,46 +38,47 @@ Aidge::GridSample_Op<DIM>::GridSample_Op(
     // ctor
 }
 
-template <Aidge::DimIdx_t DIM>
-Aidge::GridSample_Op<DIM>::GridSample_Op(const Aidge::GridSample_Op<DIM>& other)
+
+Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
     : OperatorTensor(other),
       mAttributes(other.mAttributes)
 {
     if (other.mImpl) {
-        SET_IMPL_MACRO(GridSample_Op<DIM>, *this, other.backend());
+        SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
     } else {
         mImpl = nullptr;
     }
 }
 
-template <Aidge::DimIdx_t DIM>
-Aidge::GridSample_Op<DIM>::~GridSample_Op() noexcept = default;
 
-template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op<DIM>::clone() const {
-    return std::make_shared<GridSample_Op<DIM>>(*this);
+Aidge::GridSample_Op::~GridSample_Op() noexcept = default;
+
+
+std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const {
+    return std::make_shared<GridSample_Op>(*this);
 }
 
-template <Aidge::DimIdx_t DIM>
-bool Aidge::GridSample_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
+
+bool Aidge::GridSample_Op::forwardDims(bool /*allowDataDependency*/) {
     // TODO: adapt for other formats than NCHW
     if (inputsAssociated()) {
         // check data has batch and channel dimensions: (N, C, D0, D1, ..., DN)
-        AIDGE_ASSERT((getInput(0)->nbDims() == (DIM+2)),
-                    "Wrong input size for {} operator.", type());
+        AIDGE_ASSERT(getInput(0)->nbDims() > 2, "Input should have at least one spatial dimension.");
+        const std::size_t nbSpatialFeat = getInput(0)->nbDims() -2; // all except channels and batchs
         // check grid field
         // should be (N, D0_out, D1_out, ..., DN_out, N+1)
-        AIDGE_ASSERT(((getInput(1)->nbDims() == (DIM+2)) &&
-            (getInput(1)->template dims<DIM+2>()[DIM+1] == DIM) &&
-            (getInput(1)->template dims<DIM+2>()[0] == getInput(0)->template dims<DIM+2>()[0])),
+        AIDGE_ASSERT(((getInput(1)->nbDims() == nbSpatialFeat + 2) &&
+            (getInput(1)->dims()[nbSpatialFeat+1] == nbSpatialFeat) &&
+            (getInput(1)->dims()[0] == getInput(0)->dims()[0])),
             "Wrong grid size {} for {} operator.", getInput(1)->dims(), type());
 
-        std::array<DimSize_t, DIM + 2> outputDims{};
+        std::vector<DimSize_t> outputDims{};
+        outputDims.reserve(nbSpatialFeat+2);
         const std::vector<DimSize_t>& inputDims(getInput(1)->dims());
-        outputDims[1] = getInput(0)->template dims<DIM+2>()[1];
-        outputDims[0] = inputDims[0];
-        for (std::size_t i = 2; i < DIM+2; ++i) {
-            outputDims[i] = inputDims[i-1];
+        outputDims.push_back(inputDims[0]);
+        outputDims.push_back(getInput(0)->dims()[1]);
+        for (std::size_t i = 2; i < nbSpatialFeat+2; ++i) {
+            outputDims.push_back(inputDims[i-1]);
         }
 
         mOutputs[0]->resize(outputDims);
@@ -88,31 +89,26 @@ bool Aidge::GridSample_Op<DIM>::forwardDims(bool /*allowDataDependency*/) {
 }
 
 
-template <Aidge::DimIdx_t DIM>
-void Aidge::GridSample_Op<DIM>::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
-    SET_IMPL_MACRO(GridSample_Op<DIM>, *this, name);
+
+void Aidge::GridSample_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
+    SET_IMPL_MACRO(GridSample_Op, *this, name);
     mOutputs[0]->setBackend(name, device);
 }
 
-template class Aidge::GridSample_Op<1>;
-template class Aidge::GridSample_Op<2>;
 
 ////////////////////////////////////////////////
 
-template <Aidge::DimIdx_t DIM>
+
 std::shared_ptr<Aidge::Node> Aidge::GridSample(
-                        typename Aidge::GridSample_Op<DIM>::Mode mode,
-                        typename Aidge::GridSample_Op<DIM>::PaddingMode paddingMode,
+                        typename Aidge::GridSample_Op::Mode mode,
+                        typename Aidge::GridSample_Op::PaddingMode paddingMode,
                         bool alignCorners,
                         const std::string& name)
 {
     return std::make_shared<Node>(
-        std::make_shared<GridSample_Op<DIM>>(
+        std::make_shared<GridSample_Op>(
                 mode,
                 paddingMode,
                 alignCorners),
             name);
 }
-
-template std::shared_ptr<Aidge::Node> Aidge::GridSample<1>(typename Aidge::GridSample_Op<1>::Mode, typename Aidge::GridSample_Op<1>::PaddingMode, bool, const std::string&);
-template std::shared_ptr<Aidge::Node> Aidge::GridSample<2>(typename Aidge::GridSample_Op<2>::Mode, typename Aidge::GridSample_Op<2>::PaddingMode, bool, const std::string&);
diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp
index aa20a056ad789975c5b4d493a1ce48dcd7592946..e1553fda551795a0b6f0334ccf1dbd3d2b760085 100644
--- a/src/recipes/FuseBatchNorm.cpp
+++ b/src/recipes/FuseBatchNorm.cpp
@@ -16,6 +16,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/graph/Matching.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
@@ -25,9 +26,6 @@
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
-// Graph Regex
-#include "aidge/graphRegex/GraphRegex.hpp"
-
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
                           std::shared_ptr<Aidge::Node> batchnormNode) {
     // Case: convNode is a MetaOperator ending with a Convolution
@@ -191,44 +189,11 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
 
 }
 
-void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::MatchSolution> solution) {
-    assert(solution->at("BatchNorm").size() == 1 && "Wrong number of nodes BatchNorm to replace\n");
-    assert(solution->at("OP").size() == 1 && "Wrong number of nodes OP to replace\n");
-
-    for (const auto& op : solution->at("OP")) {
-        if (op->getOperator()->isAtomic()) {
-            for (const auto& batchNorm : solution->at("BatchNorm")) {
-                fuseBatchNorm(op, batchNorm);
-            }
-        } else {  // op is a MetaOperator
-            auto metaOp = std::dynamic_pointer_cast<MetaOperator_Op>(op->getOperator());
-            if ((metaOp->getMicroGraph()->getOrderedOutputs().size() == 1) &&
-                ((metaOp->getMicroGraph()->getOrderedOutputs()[0].first->type() ==
-                  Conv_Op<2>::Type) ||
-                 (metaOp->getMicroGraph()->getOrderedOutputs()[0].first->type() ==
-                  ConvDepthWise_Op<2>::Type))) {
-                for (const auto& batchNorm : solution->at("BatchNorm")) {
-                    fuseBatchNorm(op, batchNorm);
-                }
-            }
-        }
-    }
-}
-
 void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::GraphView> graphView) {
-    std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
-    regex->setNodeKey("BatchNorm", "getType($) =='BatchNorm'");
-    fmt::print("\n============================\nSearching for solutions\n==============================\n");
-    regex->setNodeKey(
-            "OP",
-            "getType($) =='Conv' || getType($) =='ConvDepthWise' || getType($) =='PaddedConv' || getType($) =='PaddedConvDepthWise'");
-            //  || getType($) =='FC' ");
-
-    regex->addQuery("OP -> BatchNorm");
-
-    for (const auto& solution : regex->match(graphView)) {
-
-        fuseBatchNorm(solution);
+    auto matches = SinglePassGraphMatching(graphView).match("(Conv|ConvDepthWise|PaddedConv|PaddedConvDepthWise)->BatchNorm");
 
+    for (auto match : matches) {
+        auto rootNode = match.graph->rootNode();
+        fuseBatchNorm(rootNode, *rootNode->getChildren().begin());
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_DepthToSpaceImpl.cpp b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..62f760ce8b3942ab3101ff5e1324307a46048b91
--- /dev/null
+++ b/unit_tests/operator/Test_DepthToSpaceImpl.cpp
@@ -0,0 +1,87 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/DepthToSpace.hpp"
+
+
+namespace Aidge {
+
+TEST_CASE("[core/operator] DepthToSpace_Op", "[DepthToSpace][forwardDims]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+    SECTION("Nb dimensions") {
+        // Create DepthToSpace operator with block_size of 1 compatible with any size
+        std::shared_ptr<Node> myDTS = DepthToSpace(1);
+        auto op = std::static_pointer_cast<OperatorTensor>(myDTS -> getOperator());
+
+        SECTION("Scalar") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(9);
+            op -> associateInput(0,T0);
+            REQUIRE_THROWS(op->forwardDims());
+        }
+        SECTION("+1-D") {
+            // input_0
+            std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+            op -> associateInput(0,T0);
+
+            for (std::uint16_t nb_dims = 0; nb_dims < 6; ++nb_dims) {
+
+                std::vector<std::size_t> dims0(nb_dims);
+                for (std::size_t i = 0; i < nb_dims; ++i) {
+                    dims0[i] = dimsDist(gen);
+                }
+                T0->resize(dims0);
+                if (nb_dims == 4) {
+                    REQUIRE_NOTHROW(op->forwardDims());
+                } else {
+                    REQUIRE_THROWS(op->forwardDims());
+                }
+            }
+        }
+    }
+
+    SECTION("Propagation") {
+        // input_0 with 4-D in NCHW format
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>(std::vector<DimSize_t>({1, 16, 100, 100}));
+
+        DepthToSpace_Op myDTS_should_throw = DepthToSpace_Op(7);
+        myDTS_should_throw.associateInput(0,T0);
+
+        REQUIRE_THROWS(myDTS_should_throw.forwardDims());
+
+        DepthToSpace_Op myDTS_should_not_throw = DepthToSpace_Op(4);
+        myDTS_should_not_throw.associateInput(0,T0);
+
+        REQUIRE_NOTHROW(myDTS_should_not_throw.forwardDims());
+        REQUIRE(myDTS_should_not_throw.getOutput(0)->dims() == std::vector<std::size_t>({1,1,400,400}));
+    }
+}
+
+TEST_CASE("[core/operator] DepthToSpace_Op impl", "[DepthToSpace][forward]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+
+
+}
+
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_GridSample_Op.cpp b/unit_tests/operator/Test_GridSample_Op.cpp
index 754cdb705dcbc5115af32bb0994fbb08ba633c3f..ae38ec7083a0df49fb241509bf52895765ddb0e8 100644
--- a/unit_tests/operator/Test_GridSample_Op.cpp
+++ b/unit_tests/operator/Test_GridSample_Op.cpp
@@ -33,7 +33,7 @@ TEST_CASE("[core/operator] GridSample_Op(forwardDims)", "[GridSample][forwardDim
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
 
     // Create GridSample Operator
-    std::shared_ptr<Node> myGridSample = GridSample<2>(GridSample_Op<2>::Mode::Cubic, GridSample_Op<2>::PaddingMode::Border, false);
+    std::shared_ptr<Node> myGridSample = GridSample(GridSample_Op::Mode::Cubic, GridSample_Op::PaddingMode::Border, false);
     auto op = std::static_pointer_cast<OperatorTensor>(myGridSample -> getOperator());
 
     // input_0