diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
index 6cf89a45fd0d4cf1dc970d199d074e886b131896..26ae544d6e05f2f9a9da371d3617f9265a037364 100644
--- a/aidge_core/unit_tests/test_recipies.py
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -20,6 +20,18 @@ class test_recipies(unittest.TestCase):
     def tearDown(self):
         pass
 
+    def test_remove_dropout(self):
+        graph_view = aidge_core.sequential([
+            aidge_core.GenericOperator("Conv", 1, 0, 1, "Conv0"),
+            aidge_core.GenericOperator("Dropout", 1, 0, 1, name="Dropout0")
+        ])
+        old_nodes = graph_view.get_nodes()
+        aidge_core.remove_dropout(graph_view)
+        self.assertTrue(len(graph_view.get_nodes()) == len(old_nodes) - 1)
+        self.assertTrue("Dropout0" not in [i.name for i in graph_view.get_nodes()])
+
+        self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
+
     def test_remove_flatten(self):
         graph_view = aidge_core.sequential([
             aidge_core.GenericOperator("Flatten", 1, 0, 1, name="Flatten0"),
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 6782392a77159814c9c363e236e21b87ca5480d9..cc0979b07b07c2b95515eda09fda68a9ec4ac63e 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -35,7 +35,9 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/operator/Div.hpp"
+#include "aidge/operator/Erf.hpp"
 #include "aidge/operator/FC.hpp"
+#include "aidge/operator/Gather.hpp"
 #include "aidge/operator/GenericOperator.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/operator/MaxPooling.hpp"
@@ -46,13 +48,15 @@
 #include "aidge/operator/Pad.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/operator/Pow.hpp"
+#include "aidge/operator/ReduceMean.hpp"
 #include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Reshape.hpp"
 #include "aidge/operator/Scaling.hpp"
 #include "aidge/operator/Slice.hpp"
 #include "aidge/operator/Softmax.hpp"
 #include "aidge/operator/Sqrt.hpp"
 #include "aidge/operator/Sub.hpp"
-
+#include "aidge/operator/Transpose.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
 
 #include "aidge/recipies/Recipies.hpp"
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6395756f3b08c5838d390ab45d38fa9c03cb91cb
--- /dev/null
+++ b/include/aidge/operator/Erf.hpp
@@ -0,0 +1,75 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_ERF_H_
+#define AIDGE_CORE_OPERATOR_ERF_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+class Erf_Op : public OperatorTensor,
+    public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> {
+public:
+    static const std::string Type;
+
+    Erf_Op() : OperatorTensor(Type, 1, 0, 1) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Erf_Op(const Erf_Op& op)
+        : OperatorTensor(op)
+    {
+        mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Erf_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Erf_Op>(*this);
+    }
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Erf_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
+
+        // FIXME: temporary workaround
+        getInput(0)->setBackend(name);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Erf(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Erf_Op>(), name);
+}
+}
+
+#endif /* AIDGE_CORE_OPERATOR_ERF_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f8276222811f6cc02c062d85e7ae99d72edead7a
--- /dev/null
+++ b/include/aidge/operator/Gather.hpp
@@ -0,0 +1,100 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_GATHER_H_
+#define AIDGE_CORE_OPERATOR_GATHER_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Data.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class GatherAttr { Axis };
+
+class Gather_Op : public OperatorTensor,
+                public Registrable<Gather_Op,
+                                   std::string,
+                                   std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
+                public StaticAttributes<GatherAttr, int> {
+
+public:
+    static const std::string Type;
+
+    Gather_Op() = delete;
+
+    
+    using Attributes_ = StaticAttributes<GatherAttr, int>;
+    template <GatherAttr e> using attr = typename Attributes_::template attr<e>;
+    Gather_Op(int axis)
+            : OperatorTensor(Type, 2, 0, 1),
+            Attributes_(
+                attr<GatherAttr::Axis>(axis))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Gather_Op(const Gather_Op& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Gather_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Gather_Op>(*this);
+    }
+
+    void computeOutputDims() override final;
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Gather_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
+
+        // FIXME: temporary workaround
+        getInput(0)->setBackend(name);
+        getInput(1)->setBackend(name);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input", "indexes"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Gather(int axis = 0, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Gather_Op>(axis), name);
+}
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..0acd21b28fac54e7e6d30e8219ead0e04ef777f6
--- /dev/null
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -0,0 +1,146 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
+#define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class ReduceMeanAttr { Axes, KeepDims };
+
+template <DimIdx_t DIM>
+class ReduceMean_Op : public OperatorTensor,
+                public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
+                public StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t> {
+
+   public:
+    static const std::string Type;
+
+    ReduceMean_Op() = delete;
+
+    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t>;
+    template <ReduceMeanAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    constexpr ReduceMean_Op(const std::array<int, DIM> &axes, DimSize_t keep_dims)
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(attr<ReduceMeanAttr::Axes>(axes),
+                      attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    ReduceMean_Op(const ReduceMean_Op<DIM>& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::ReduceMean_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<ReduceMean_Op<DIM>>(*this);
+    }
+
+    void computeOutputDims() override final {
+        if (!getInput(0)->empty()) {
+            std::vector<DimSize_t> outDims;
+            for(std::size_t d=0; d<getInput(0)->dims().size(); ++d)
+            {
+                bool reducedDim =  false;
+                for(std::size_t i=0; i<DIM; ++i)
+                {
+                    int axis_ = this->template getAttr<ReduceMeanAttr::Axes>()[i];
+                    std::size_t axis= axis_>=0? axis_: axis_ + getInput(0)->nbDims();
+                    if(axis == d)
+                    {
+                        reducedDim = true;
+                        break;
+                    }
+                }
+                if(reducedDim)
+                {
+                    if(this->template getAttr<ReduceMeanAttr::KeepDims>())
+                        outDims.push_back(1);
+                }
+                else
+                    outDims.push_back(getInput(0)->dims()[d]);
+            }        
+            if(outDims.size()>0)
+                mOutputs[0]->resize(outDims);
+            else
+                mOutputs[0]->resize({1});
+        }
+    }
+
+    void setBackend(const std::string &name) override {
+        mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
+
+        // FIXME: temporary workaround
+        getInput(0)->setBackend(name);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
+                                        DimSize_t keep_dims=1,
+                                        const std::string& name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
+    return std::make_shared<Node>(std::make_shared<ReduceMean_Op<static_cast<DimIdx_t>(DIM)>>(axes, keep_dims), name);
+
+}
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> ReduceMean(
+    int const (&axes)[DIM],
+    DimSize_t keep_dims = 1,
+    const std::string& name = "") {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
+    return ReduceMean(to_array(axes), keep_dims, name);
+}
+
+template <DimIdx_t DIM>
+const std::string ReduceMean_Op<DIM>::Type = "ReduceMean";
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"Axes", "KeepDims"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1ffa045960037f35167ae2d6e8904c49e2c55560
--- /dev/null
+++ b/include/aidge/operator/Reshape.hpp
@@ -0,0 +1,97 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_RESHAPE_H_
+#define AIDGE_CORE_OPERATOR_RESHAPE_H_
+
+#include <cassert>
+#include <memory>
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+
+enum class ReshapeAttr { Shape };
+
+class Reshape_Op : public OperatorTensor,
+                   public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)>,
+                   public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> {
+
+public:
+    static const std::string Type;
+
+    Reshape_Op() = delete;
+
+    using Attributes_ = StaticAttributes<ReshapeAttr, std::vector<std::int64_t>>;
+    template <ReshapeAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    Reshape_Op(const std::vector<std::int64_t>& shape)
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(attr<ReshapeAttr::Shape>(shape))
+    {}
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Reshape_Op(const Reshape_Op& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Reshape_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Reshape_Op>(*this);
+    }
+
+    void computeOutputDims() override final;
+
+    void setBackend(const std::string& name) override {
+        mImpl = Registrar<Reshape_Op>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
+
+        // FIXME: temporary workaround
+        getInput(0)->setBackend(name);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape,
+                                   		const std::string &name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    return std::make_shared<Node>(std::make_shared<Reshape_Op>(shape), name);
+}
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "Shape" };
+}
+
+#endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index bcf9a5a66147b821a062cd6b93087cb1c45bca00..972edc12059b2e0a2b2343e74892237b0fc338d8 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -16,29 +16,44 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/operator/OperatorTensor.hpp"
+
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+enum class SoftmaxAttr { AxisIdx };
 
 class Softmax_Op : public OperatorTensor,
-    public Registrable<Softmax_Op, std::string, std::unique_ptr<OperatorImpl>(const Softmax_Op&)> {
+                public Registrable<Softmax_Op,
+                                   std::string,
+                                   std::unique_ptr<OperatorImpl>(const Softmax_Op&)>,
+                public StaticAttributes<SoftmaxAttr, int> {
+
 public:
     static const std::string Type;
 
-    Softmax_Op() : OperatorTensor(Type, 1, 0, 1) {}
+    Softmax_Op() = delete;
+
+    using Attributes_ = StaticAttributes<SoftmaxAttr, int>;
+    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
+    Softmax_Op(int axis)
+            :  OperatorTensor(Type, 1, 0, 1),
+            Attributes_(attr<SoftmaxAttr::AxisIdx>(axis)) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
     Softmax_Op(const Softmax_Op& op)
-        : OperatorTensor(op)
+        : OperatorTensor(op),
+          Attributes_(op)
     {
         mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
@@ -67,9 +82,14 @@ public:
     }
 };
 
-inline std::shared_ptr<Node> Softmax(const std::string& name = "") {
-    return std::make_shared<Node>(std::make_shared<Softmax_Op>(), name);
+inline std::shared_ptr<Node> Softmax(int axis, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<Softmax_Op>(axis), name);
 }
+} // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"Axis"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f111be76cd712265e92e2e4c3e0220f79e13b1f7
--- /dev/null
+++ b/include/aidge/operator/Transpose.hpp
@@ -0,0 +1,125 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_OPERATOR_TRANSPOSE_H_
+#define AIDGE_CORE_OPERATOR_TRANSPOSE_H_
+
+#include <array>
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+enum class TransposeAttr { OutputDimsOrder };
+
+template <DimIdx_t DIM>
+class Transpose_Op : public OperatorTensor,
+                public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
+                public StaticAttributes<TransposeAttr,
+                                       std::array<DimSize_t, DIM>> {
+
+   public:
+    static const std::string Type;
+
+    Transpose_Op() = delete;
+
+    using Attributes_ = StaticAttributes<TransposeAttr,
+                                             std::array<DimSize_t, DIM>>;
+    template <TransposeAttr e>
+    using attr = typename Attributes_::template attr<e>;
+
+    constexpr Transpose_Op(const std::array<DimSize_t, DIM> &output_dims_order)
+        : OperatorTensor(Type, 1, 0, 1),
+          Attributes_(attr<TransposeAttr::OutputDimsOrder>(output_dims_order)) { }
+
+    /**
+     * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
+     * @param op Operator to copy.
+     */
+    Transpose_Op(const Transpose_Op<DIM>& op)
+        : OperatorTensor(op),
+          Attributes_(op)
+    {
+        mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+    }
+
+    /**
+     * @brief Clone the operator using its copy-constructor.
+     * @see Operator::Transpose_Op
+     */
+    std::shared_ptr<Operator> clone() const override {
+        return std::make_shared<Transpose_Op<DIM>>(*this);
+    }
+
+    void computeOutputDims() override final {
+        if (!getInput(0)->empty()) {
+            auto attr = (this)->getStaticAttributes();
+            const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr));
+            std::vector<DimSize_t> outputDims;
+            for (std::size_t i = 0; i < DIM; ++i) {
+                outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
+            }
+            mOutputs[0]->resize(outputDims);
+        }
+    }
+
+    void setBackend(const std::string &name) override {
+        mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this);
+        mOutputs[0]->setBackend(name);
+
+        // FIXME: temporary workaround
+        getInput(0)->setBackend(name);
+    }
+
+    static const std::vector<std::string> getInputsName(){
+        return {"data_input"};
+    }
+    static const std::vector<std::string> getOutputsName(){
+        return {"data_output"};
+    }
+};
+
+template <std::array<DimSize_t, 1>::size_type DIM>
+inline std::shared_ptr<Node> Transpose(const std::array<DimSize_t, DIM> &output_dims_order,
+                                           const std::string& name = "") {
+    // FIXME: properly handle default w&b initialization in every cases
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
+    return std::make_shared<Node>(std::make_shared<Transpose_Op<static_cast<DimIdx_t>(DIM)>>(output_dims_order), name);
+}
+
+// helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
+template <DimSize_t DIM>
+inline std::shared_ptr<Node> Transpose(
+    DimSize_t const (&output_dims_order)[DIM],
+    const std::string& name = "") {
+    static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Transpose, not supported");
+    return Transpose(to_array(output_dims_order), name);
+}
+
+template <DimIdx_t DIM>
+const std::string Transpose_Op<DIM>::Type = "Transpose";
+
+}  // namespace Aidge
+
+namespace {
+template <>
+const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"OutputDimsOrder"};
+}
+
+#endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/recipies/Recipies.hpp b/include/aidge/recipies/Recipies.hpp
index 5ad08a6582aa886604d0068f75cab9fe1631b05e..0c4cb6566338bd22ace6c28cd67aab90f4cf93c2 100644
--- a/include/aidge/recipies/Recipies.hpp
+++ b/include/aidge/recipies/Recipies.hpp
@@ -42,6 +42,24 @@ void fuseMulAdd(std::shared_ptr<Node> matmul,std::shared_ptr<Node> add);
  */
 void fuseMulAdd(std::shared_ptr<GraphView> graphView);
 
+// REMOVE Dropout
+
+/**
+ * @brief Remove ``Dropout`` Node.
+ *
+ * @param nodes Node to remove.
+ */
+void removeDropout(std::shared_ptr<Node> dropout);
+
+
+void removeDropout(std::shared_ptr<MatchSolution> solution);
+
+/**
+ * @brief Remove ``Dropout`` Node.
+ *
+ * @param graphView Graph view to use graph matching on, in order to apply transfomrations.
+ */
+void removeDropout(std::shared_ptr<GraphView> graphView);
 
 // REMOVE FLATTEN + FC -> FC
 
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2b7e5d6b99194e914e48dc6263d0bdcd6a4a8a2f
--- /dev/null
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -0,0 +1,28 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+
+#include "aidge/operator/Concat.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Concat(py::module& m) {
+    py::class_<Concat_Op, std::shared_ptr<Concat_Op>, OperatorTensor, Attributes>(m, "ConcatOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Concat_Op::getInputsName)
+    .def("get_outputs_name", &Concat_Op::getOutputsName);
+
+    m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..806867f61c3580543c184d529edc2856ee8d7a6c
--- /dev/null
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Erf.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Erf(py::module& m) {
+    py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Erf_Op::getInputsName)
+    .def("get_outputs_name", &Erf_Op::getOutputsName);
+
+    m.def("Erf", &Erf, py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f9768e38fbdceef4a15cc74430bc2205bb32cb6a
--- /dev/null
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -0,0 +1,28 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <string>
+
+#include "aidge/operator/Gather.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Gather(py::module& m) {
+    py::class_<Gather_Op, std::shared_ptr<Gather_Op>, OperatorTensor, Attributes>(m, "GatherOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Gather_Op::getInputsName)
+    .def("get_outputs_name", &Gather_Op::getOutputsName);
+
+    m.def("Gather", &Gather, py::arg("axis"), py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e5de98b69adde5133dde302f7306bc8a5c471eef
--- /dev/null
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -0,0 +1,54 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/utils/Types.h"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
+  py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, OperatorTensor, Attributes>(
+    m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
+    .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
+    ;
+
+  m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
+                                                                DimSize_t keepDims,
+                                                                const std::string& name) {
+        AIDGE_ASSERT(axes.size() == DIM, "axes size [%ld] does not match DIM [%d]", axes.size(), DIM);
+
+        return ReduceMean<DIM>(to_array<DIM>(axes.begin()), keepDims, name);
+    }, py::arg("axes"),
+       py::arg("keep_dims") = 1,
+       py::arg("name") = "");
+}
+
+
+void init_ReduceMean(py::module &m) {
+  declare_ReduceMeanOp<1>(m);
+  declare_ReduceMeanOp<2>(m);
+  declare_ReduceMeanOp<3>(m);
+
+  // FIXME:
+  // m.def("ReduceMean1D", static_cast<NodeAPI(*)(const char*, int, int, int const
+  // (&)[1])>(&ReduceMean));
+}
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d34a411c719bdbb1144edaa65b50050d705e0d90
--- /dev/null
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Reshape(py::module& m) {
+    py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Reshape_Op::getInputsName)
+    .def("get_outputs_name", &Reshape_Op::getOutputsName);
+
+    m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7bfd1b4f00579ed29658db73b71f2c596048fe75
--- /dev/null
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -0,0 +1,27 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+
+#include "aidge/operator/Slice.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Slice(py::module& m) {
+    py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
+    .def("get_inputs_name", &Slice_Op::getInputsName)
+    .def("get_outputs_name", &Slice_Op::getOutputsName);
+
+    m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = "");
+}
+}  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index dc29e2171ff6f0fbbb5c80183778d8f20cbe085b..04e92d39971a731931397e943aba6e296a81a14d 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -19,10 +19,10 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Softmax(py::module& m) {
-    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor>(m, "SoftmaxOp", py::multiple_inheritance())
+    py::class_<Softmax_Op, std::shared_ptr<Softmax_Op>, OperatorTensor, Attributes>(m, "SoftmaxOp", py::multiple_inheritance())
     .def("get_inputs_name", &Softmax_Op::getInputsName)
     .def("get_outputs_name", &Softmax_Op::getOutputsName);
 
-    m.def("Softmax", &Softmax, py::arg("name") = "");
+    m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e92e9c2aaafe2d20220da053a2b9d799fbe8466d
--- /dev/null
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -0,0 +1,52 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+
+#include <string>
+#include <vector>
+#include <array>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/data/Tensor.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+template <DimIdx_t DIM> 
+void declare_Transpose(py::module &m) {
+  py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, OperatorTensor, Attributes>(
+    m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+  .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
+  .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName);
+
+  m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
+                                                                  const std::string& name) {
+        AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [%ld] does not match DIM [%d]", output_dims_order.size(), DIM);
+        return Transpose<DIM>(to_array<DIM>(output_dims_order.begin()), name);
+    }, py::arg("output_dims_order"),
+       py::arg("name") = "");
+
+}
+
+void init_Transpose(py::module &m) {
+  declare_Transpose<2>(m);
+  declare_Transpose<3>(m);
+  declare_Transpose<4>(m);
+  declare_Transpose<5>(m);
+  declare_Transpose<6>(m);
+
+}
+} // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index b1e0e0d11fbbae61a6b853e866adc02e77f315dd..0353953fda39cd6dc283d20a0a3e36659dd891a4 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -24,10 +24,13 @@ void init_OperatorTensor(py::module&);
 void init_Add(py::module&);
 void init_AvgPooling(py::module&);
 void init_BatchNorm(py::module&);
+void init_Concat(py::module&);
 void init_Conv(py::module&);
 void init_ConvDepthWise(py::module&);
 void init_Div(py::module&);
+void init_Erf(py::module&);
 void init_FC(py::module&);
+void init_Gather(py::module&);
 void init_GenericOperator(py::module&);
 void init_LeakyReLU(py::module&);
 void init_MatMul(py::module&);
@@ -37,10 +40,14 @@ void init_Mul(py::module&);
 void init_Producer(py::module&);
 void init_Pad(py::module&);
 void init_Pow(py::module&);
+void init_ReduceMean(py::module&);
 void init_ReLU(py::module&);
+void init_Reshape(py::module&);
+void init_Slice(py::module&);
 void init_Softmax(py::module&);
 void init_Sqrt(py::module&);
 void init_Sub(py::module&);
+void init_Transpose(py::module&);
 void init_Identity(py::module&);
 
 void init_Node(py::module&);
@@ -72,10 +79,13 @@ void init_Aidge(py::module& m){
     init_Add(m);
     init_AvgPooling(m);
     init_BatchNorm(m);
+    init_Concat(m);
     init_Conv(m);
     init_ConvDepthWise(m);
     init_Div(m);
+    init_Erf(m);
     init_FC(m);
+    init_Gather(m);
     init_GenericOperator(m);
     init_LeakyReLU(m);
     init_MatMul(m);
@@ -85,10 +95,14 @@ void init_Aidge(py::module& m){
     init_Pad(m);
 
     init_Pow(m);
+    init_ReduceMean(m);
     init_ReLU(m);
+    init_Reshape(m);
+    init_Slice(m);
     init_Softmax(m);
     init_Sqrt(m);
     init_Sub(m);
+    init_Transpose(m);
     init_Identity(m);
 
     init_Producer(m);
diff --git a/python_binding/recipies/pybind_Recipies.cpp b/python_binding/recipies/pybind_Recipies.cpp
index 8c0c66ec14121ea01655a49addc567b1ad9398c8..bd058defb21c13cea1323e4748129c92519de039 100644
--- a/python_binding/recipies/pybind_Recipies.cpp
+++ b/python_binding/recipies/pybind_Recipies.cpp
@@ -38,6 +38,13 @@ void init_Recipies(py::module &m) {
   //   :type nodes: list of :py:class:`aidge_core.Node`
   //   )mydelimiter");
 
+  m.def("remove_dropout",static_cast<void(*)(std::shared_ptr<GraphView>)>(removeDropout), py::arg("graph_view"), R"mydelimiter(
+    Recipie to remove a dropout operator.
+
+    :param graph_view: Graph view on which we want to apply the recipie
+    :type graph_view: :py:class:`aidge_core.GraphView`
+    )mydelimiter");
+
   m.def("remove_flatten", static_cast<void(*)(std::shared_ptr<GraphView>)>(removeFlatten), py::arg("graph_view"), R"mydelimiter(
     Recipie to remove a flatten operator.
 
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..387af4edf417f8c7ac6ee9b8b2b7069179ad59cb
--- /dev/null
+++ b/src/operator/Erf.cpp
@@ -0,0 +1,16 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <string>
+
+#include "aidge/operator/Erf.hpp"
+
+const std::string Aidge::Erf_Op::Type = "Erf";
\ No newline at end of file
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..30804994b6084a5a5558f106a38a6087e54471bc
--- /dev/null
+++ b/src/operator/Gather.cpp
@@ -0,0 +1,39 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/Gather.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+const std::string Aidge::Gather_Op::Type = "Gather";
+
+void Aidge::Gather_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
+    }
+
+    if (getInput(1)->nbDims()!=2){
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Indices input must be a 2D Tensor");
+    }
+
+    std::vector<DimSize_t> outDims = getInput(0)->dims();
+    std::vector<DimSize_t> indexesDims = getInput(1)->dims();
+    int axisIdx = this->template getAttr<GatherAttr::Axis>()>=0?this->template getAttr<GatherAttr::Axis>():this->template getAttr<GatherAttr::Axis>()+outDims.size();
+    outDims.erase(outDims.begin() + static_cast<std::size_t>(axisIdx));
+    outDims.insert(outDims.begin() + static_cast<std::size_t>(axisIdx), indexesDims.begin(),indexesDims.end());
+    mOutputs[0]->resize(outDims);
+}
\ No newline at end of file
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b0eea3c1f9f7054021b631c85e0f80e7f8845da6
--- /dev/null
+++ b/src/operator/Reshape.cpp
@@ -0,0 +1,47 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cstddef>
+#include <string>
+#include <vector>
+
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
+
+const std::string Aidge::Reshape_Op::Type = "Reshape";
+
+void Aidge::Reshape_Op::computeOutputDims() {
+    // check inputs have been associated
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
+    }
+
+    DimSize_t nbOutDims = this->template getAttr<ReshapeAttr::Shape>().size();
+    std::vector<DimSize_t> outDims;
+    std::size_t outSize = 1;
+    for(std::size_t i=0; i<nbOutDims; ++i)
+    {
+        int dimSize = this->template getAttr<ReshapeAttr::Shape>()[i];
+        if (dimSize < 1)
+        {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "bad dimension value");
+        }
+        outDims.push_back(dimSize);
+        outSize *= dimSize;
+    }
+
+    if (getInput(0)->size() != outSize){
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Output shape must give the same size as input");
+    }
+
+    mOutputs[0]->resize(outDims);
+}
\ No newline at end of file
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index bccb0b8b48cf9d7f44f685c058ef7b50e7ca1df0..139e84b561a48c2f6a5ecd14ed9d6905d66dec20 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -9,6 +9,8 @@
  *
  ********************************************************************************/
 #include "aidge/operator/Slice.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
 #include <cassert>
 #include <cstddef>
diff --git a/src/recipies/RemoveDropout.cpp b/src/recipies/RemoveDropout.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1dedac8f19e6ec6b4b1f6dabb6bd3e9b8c759def
--- /dev/null
+++ b/src/recipies/RemoveDropout.cpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <memory>
+#include <iostream>
+
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/recipies/Recipies.hpp"
+
+//Graph Regex
+#include "aidge/graphRegex/GraphRegex.hpp"
+
+
+namespace Aidge {
+    void removeDropout(std::shared_ptr<Node> dropout) {
+
+        std::set<NodePtr> nodesToRemove;
+        for (auto nodePtr: dropout->getParents())
+        {
+            if(nodePtr->type() == "Producer")
+            {
+                nodesToRemove.insert(nodePtr);
+            }
+        }
+        nodesToRemove.insert(dropout);
+        GraphView::replace(nodesToRemove, {});
+    }
+
+    void removeDropout(std::shared_ptr<MatchSolution> solution){
+
+        assert(solution->at("Dropout").size() == 1 && "Wrong number of nodes Dropout to replace\n");
+
+        for (const auto& dropout : solution->at("Dropout")) {
+
+            removeDropout(dropout);
+        }
+    }
+
+    void removeDropout(std::shared_ptr<GraphView> graphView){
+        std::shared_ptr<GraphRegex> regex = std::make_shared<GraphRegex>();
+        regex->setNodeKey("Dropout","getType($) =='Dropout'");
+        regex->addQuery("Dropout#");
+
+        for (const auto& solution : regex->match(graphView)) {
+            removeDropout(solution);
+        }
+    }
+}