diff --git a/include/aidge/data/Data.hpp b/include/aidge/data/Data.hpp
index 219a37da30bf9cf830e071633c1deb517d14ab48..8fe2263c0aa2a2a3e70dc458ababc406b6823e0d 100644
--- a/include/aidge/data/Data.hpp
+++ b/include/aidge/data/Data.hpp
@@ -61,7 +61,9 @@ enum class DataFormat {
     CDHWN
 };
 
-constexpr std::array<std::array<size_t, 5>, 7> DataFormatTranspose = {{
+using DataFormatTranspose = std::array<size_t, 5>;
+// Permutation arrays dict to obtain DataFormat (same order as DataFormat enum)
+constexpr std::array<DataFormatTranspose, 7> DataFormatTransposeDict = {{
     // Important: in this array only, dimension index must start at 1, not 0!
     // (0 is the default value)
     {},
@@ -73,6 +75,43 @@ constexpr std::array<std::array<size_t, 5>, 7> DataFormatTranspose = {{
     {2, 3, 4, 5, 1}
 }};
 
+/**
+ * Get the DataFormatTranspose array to transpose data from src to dst DataFormat.
+ * @param src Source DataFormat
+ * @param dst Destinatin DataFormat
+ * @return DataFormatTranspose Permutation array to achieve a transposition 
+ *         from src to dst DataFormat.
+*/
+constexpr inline DataFormatTranspose getDataFormatTranspose(const DataFormat& src, const DataFormat& dst) {
+    // Permutation array from default format to src format
+    const auto srcDefToFormat = DataFormatTransposeDict[static_cast<int>(src)];
+    // Permutation array from default format to dst format
+    const auto dstDefToFormat = DataFormatTransposeDict[static_cast<int>(dst)];
+    // Compute permutation array from src format to default format:
+    DataFormatTranspose srcFormatToDef{};
+    for (size_t i = 0; i < srcDefToFormat.size(); ++i) {
+        if (srcDefToFormat[i] > 0) {
+            srcFormatToDef[srcDefToFormat[i] - 1] = i;
+        }
+        else {
+            srcFormatToDef[i] = i;
+        }
+    }
+
+    // Compute permutation array from src format to dst format:
+    DataFormatTranspose srcToDst{};
+    for (size_t i = 0; i < dstDefToFormat.size(); ++i) {
+        if (dstDefToFormat[srcFormatToDef[i]] > 0) {
+            srcToDst[i] = dstDefToFormat[srcFormatToDef[i]] - 1;
+        }
+        else {
+            srcToDst[i] = i;
+        }
+    }
+
+    return srcToDst;
+}
+
 class Data {
 public:
     Data(const std::string& type): mType(type) {};
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index a442af8e3f1e06bda92ebdb12fbdf78e2fb7b201..5f6be6045167f6ff523876aaa309a536683810de 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -430,10 +430,19 @@ public:
     }
 
     /**
-     * @brief Set the DataFormat of the Tensor.
-     * @param df DataFormat
-     */
-    void setDataFormat(const DataFormat df) {
+     * @brief Set the DataFormat of the Tensor and transpose data, only
+     * if the Tensor has already been initialized and copyTrans is true.
+     * In this case, a transposition occurs only if both previous format and 
+     * new format are different from DataFormat::Default.
+     * @param df New DataFormat
+     * @param copyTrans If true (default), when both previous format and new
+     *                  format are different from DataFormat::Default, previous
+     *                  data is copy-transposed.
+     */
+    void setDataFormat(const DataFormat df, bool copyTrans = true) {
+        if (mImpl && copyTrans && (dataFormat() != df) && df != DataFormat::Default && dataFormat() != DataFormat::Default) {
+            copyTranspose(*this, getDataFormatTranspose(dataFormat(), df));
+        }
         mDataFormat = df;
     }
 
@@ -716,6 +725,13 @@ public:
     */
     void copyFrom(const Tensor& src);
 
+    /**
+     * Transpose data from another Tensor (which can be itself).
+     * @param src Source tensor to copy from.
+    */
+    void copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose);
+    void copyTranspose(const Tensor& src, const DataFormatTranspose& transpose);
+
     /**
      * Copy-cast data from a Tensor.
      * @param src Source tensor to copy-cast from.
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index c9a4c11d780a41a1620518047d66a7de2d7b55fa..59c00bc804f659096ff3e5b66fef06ca2c625f82 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -214,8 +214,10 @@ public:
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
-    /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
+    /** @brief Set the same data type for each Operator of the GraphView object's Nodes. */
     void setDataType(const DataType& datatype) const;
+    /** @brief Set the same data format for each Operator of the GraphView object's Nodes. */
+    void setDataFormat(const DataFormat& dataformat) const;
 
 ///////////////////////////////////////////////////////
 //        TOPOLOGY
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index 35ddb81dede6840e237c32b27e658440479fe680..48137610fe74fc8839c2e5dcf6db1df10e29d420 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -117,11 +117,17 @@ std::set<std::shared_ptr<Node>> getConvHorizontalTiling(const std::shared_ptr<No
 
 
 /**
- * Add Convert operators where needed to ensure no conversion needs to be done
+ * Add Cast and Move operators where needed to ensure no conversion needs to be done
  * at the Operator level.
 */
 void explicitCastMove(std::shared_ptr<GraphView> graphView);
 
+/**
+ * Add Transpose operators where needed to ensure no transposition needs to be done
+ * at the Operator level.
+*/
+void explicitTranspose(std::shared_ptr<GraphView> graphView);
+
 /**
  * Flatten the graph by replacing the meta operators by their micro graph.
  * @param recursive If true, recursively replace meta operators until there is
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 677bd0246e145ebf760f210000728bd2d99a3807..fac7ff0f6cad4c80700bf40266b0457768e8511b 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -319,6 +319,52 @@ void Aidge::Tensor::copyFrom(const Tensor& src) {
                         mImplOffset);
 }
 
+void Aidge::Tensor::copyTranspose(const Tensor& src, const std::vector<DimSize_t>& transpose) {
+    std::vector<DimSize_t> newDims;
+    for (std::size_t i = 0; i < src.dims().size(); ++i) {
+        newDims.push_back(src.dims()[transpose[i]]);
+    }
+
+    std::vector<std::size_t> newStrides(newDims.size(), 1);
+    for (size_t i = 0; i < newDims.size(); ++i) {
+        for (size_t j = i + 1; j < newDims.size(); ++j) {
+            newStrides[i] *= newDims[j];
+        }
+    }
+
+    std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, newDims);
+
+    std::vector<size_t> indices(newDims.size(), 0);
+    for (size_t i = 0; i < src.size(); ++i) {
+        size_t idx = 0;
+        // Permute indices based on OutputDimsOrder attr
+        for (int j = newDims.size() -1; j >=0; --j) {
+            idx += indices[transpose[j]] * newStrides[j];
+        }
+
+        // Copy the value in output
+        newImpl->copy(src.getImpl()->rawPtr(i), 1, idx);
+
+        // Update indices for the next iteration
+        for (int j = newDims.size() - 1; j >= 0; --j) {
+            if (indices[j] < src.dims()[j] - 1) {
+                indices[j]++;
+                break;
+            }
+            else {
+                indices[j] = 0;
+            }
+        }
+    }
+
+    resize(newDims);
+    setImpl(newImpl);
+}
+
+void Aidge::Tensor::copyTranspose(const Tensor& src, const DataFormatTranspose& transpose) {
+    copyTranspose(src, std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+}
+
 void Aidge::Tensor::copyCastFrom(const Tensor& src,
                                  std::shared_ptr<Tensor>& movedSrcPtr) {
     if (&src == this) {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 163ea35c716cd6948c998f1b08f9f07d28fe1940..21dd8170d30111b73d8851895b3ca632e3864c35 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -472,6 +472,12 @@ void Aidge::GraphView::setDataType(const Aidge::DataType &datatype) const {
     }
 }
 
+void Aidge::GraphView::setDataFormat(const Aidge::DataFormat &dataformat) const {
+    for (const auto& node : getNodes()) {
+        node->getOperator()->setDataFormat(dataformat);
+    }
+}
+
 std::vector<
     std::vector<std::pair<std::shared_ptr<Aidge::Node>, Aidge::IOIndex_t>>>
 Aidge::GraphView::outputs() const {
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 48c139d36c5b5180397dbeb9f7c392b603189463..0e3661fa108fd46d9e2090d1075bfba6a733db50 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -174,6 +174,7 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
         getOutput(i)->setDataType(dataType);
     }
 
+    // Set data type for parameters inputs only (weights, bias...), which are usually Producers
     for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
         AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
         getInput(i)->setDataType(dataType);
@@ -185,6 +186,7 @@ void Aidge::OperatorTensor::setDataFormat(const DataFormat& dataFormat) const {
         getOutput(i)->setDataFormat(dataFormat);
     }
 
+    // Set data format for parameters inputs only (weights, bias...), which are usually Producers
     for (IOIndex_t i = nbData(); i < nbInputs(); ++i) {
         AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
         getInput(i)->setDataFormat(dataFormat);
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 20b2e5a15508368a7a3ca3bbf80bd4174d98ae4e..7b20366576b16868af20947a2248ae3e2df85650 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -25,37 +25,7 @@
 
 void Aidge::TransposeImpl::forward() {
     const Transpose_Op& op = dynamic_cast<const Transpose_Op&>(mOp);
-    const auto inputDims = op.getInput(0)->dims();
-    const auto outputDims = op.getOutput(0)->dims();
-
-    std::vector<std::size_t> outStrides(outputDims.size(), 1);
-    for (size_t i = 0; i < outputDims.size(); ++i) {
-        for (size_t j = i+1; j < outputDims.size(); ++j)
-        {
-            outStrides[i] *= outputDims[j];
-        }
-    }
-
-    std::vector<size_t> indices(outputDims.size(), 0);
-    for (size_t i = 0; i < op.getInput(0)->size(); ++i) {
-        size_t idx = 0;
-        // Permute indices based on OutputDimsOrder attr
-        for (int j = outputDims.size() -1; j >=0; --j) {
-            idx += indices[op.getAttr<std::vector<DimSize_t>>(0)[j]] * outStrides[j];
-        }
-        // Copy the value in output
-        op.getOutput(0)->getImpl()->copy(op.getInput(0)->getImpl()->rawPtr(i), 1, idx);
-
-        // Update indices for the next iteration
-        for (int j = outputDims.size() - 1; j >= 0; --j) {
-            if (indices[j] < inputDims[j] - 1) {
-                indices[j]++;
-                break;
-            } else {
-                indices[j] = 0;
-            }
-        }
-    }
+    op.getOutput(0)->copyTranspose(*(op.getInput(0)), op.getAttr<std::vector<DimSize_t>>(0));
 }
 
 const std::string Aidge::Transpose_Op::Type = "Transpose";
diff --git a/src/recipes/ExplicitTranspose.cpp b/src/recipes/ExplicitTranspose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..565ab727620f77400369fd679f90d0c0c98becc6
--- /dev/null
+++ b/src/recipes/ExplicitTranspose.cpp
@@ -0,0 +1,91 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Transpose.hpp"
+
+void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) {
+    // First, remove existing Transpose operators, if not needed anymore
+    auto nodes = graph->getNodes();
+    for (auto node : nodes) {
+        AIDGE_ASSERT(node->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+        const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0);
+
+        if (node->type() == Transpose_Op::Type) {
+            // Remove existing Transpose operators, if not needed anymore
+            AIDGE_INTERNAL_ASSERT(node->inputs().size() == 1);
+            const auto parent = node->inputs()[0];
+            // Check parent is not nullptr, as this Operator may be an entry point of the graph without parent
+            if (parent.first != nullptr) {
+                AIDGE_ASSERT(parent.first->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
+                const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second);
+
+                if (input->dataFormat() != DataFormat::Default
+                    && output->dataFormat() != DataFormat::Default
+                    && input->dataFormat() == output->dataFormat())
+                {
+                    // Add direct connection bypassing Transpose node
+                    const auto childs = node->outputs()[0];
+                    for (const auto& child : childs) {
+                        parent.first->addChild(child.first, parent.second, child.second);
+                    }
+
+                    // Remove all node connections
+                    node->resetConnections();
+                    // Remove node from view
+                    graph->remove(node);
+                }
+            }
+        }
+    }
+
+    // Second, insert Transpose operator between node inputs and parent output, if needed
+    nodes = graph->getNodes();
+    for (auto node : nodes) {
+        // TODO: currently, Operator data type is only reflected in its output tensor data type.
+        // But an Operator might have multiple outputs of different data type(?)
+        const auto& output = std::static_pointer_cast<OperatorTensor>(node->getOperator())->getOutput(0);
+
+        IOIndex_t inputIdx = 0;
+        for (auto parent : node->inputs()) {
+            // TODO: possible optimization: currently, a Transpose Operator may 
+            // be added several time to the same output, if it has multiple childs,
+            // even if it is the same conversion each time.
+            if (parent.first != nullptr) {
+                const auto& input = std::static_pointer_cast<OperatorTensor>(parent.first->getOperator())->getOutput(parent.second);
+
+                if ((node->type() != Transpose_Op::Type
+                    && input->dataFormat() != DataFormat::Default
+                    && output->dataFormat() != DataFormat::Default
+                    && input->dataFormat() != output->dataFormat()))
+                {
+                    const auto transpose = getDataFormatTranspose(input->dataFormat(), output->dataFormat());
+                    auto transposeOp = Transpose(std::vector<DimSize_t>(transpose.begin(), transpose.end()));
+                    transposeOp->getOperator()->setDataFormat(output->dataFormat());
+                    transposeOp->getOperator()->setDataType(output->dataType());
+                    if (output->getImpl()) {
+                        const auto& device = output->getImpl()->device();
+                        transposeOp->getOperator()->setBackend(device.first, device.second);
+                    }
+                    transposeOp->addChild(node, 0, inputIdx);
+                    parent.first->addChild(transposeOp, parent.second, 0);
+
+                    graph->add(transposeOp);
+                    graph->add(parent.first);
+                    graph->add(node);
+                }
+            }
+
+            ++inputIdx;
+        }
+    }
+}
diff --git a/unit_tests/recipes/Test_ExplicitTranspose.cpp b/unit_tests/recipes/Test_ExplicitTranspose.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..abcc25bc9f7d12e420880b7a83a23ab1cf09d6a1
--- /dev/null
+++ b/unit_tests/recipes/Test_ExplicitTranspose.cpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/recipes/Recipes.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include <cstddef>
+
+using namespace Aidge;
+
+TEST_CASE("[ExplicitTranspose] conv") {
+    auto conv1 = Conv(3, 32, {3, 3}, "conv1");
+    auto conv2 = Conv(32, 64, {3, 3}, "conv2");
+    auto conv3 = Conv(64, 10, {1, 1}, "conv3", {2, 2});
+
+    auto g1 = Sequential({
+        Producer({16, 3, 224, 224}, "dataProvider"),
+        conv1,
+        conv2,
+        conv3
+    });
+
+    g1->setDataFormat(DataFormat::NCHW);
+    conv2->getOperator()->setDataFormat(DataFormat::NHWC);
+
+    g1->save("explicitTranspose_before");
+    REQUIRE(g1->getNodes().size() == 10);
+
+    g1->forwardDims();
+    explicitTranspose(g1);
+
+    g1->save("explicitTranspose_after");
+    REQUIRE(g1->getNodes().size() == 12);
+}