diff --git a/include/aidge/backend/cpu/operator/TransposeImpl.hpp b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
index 3bcbeda6b6263fcbe7e33cd907b8f13bd62b6471..712e672752648f5ff8a3c073f6c81bbe7cc85d9d 100644
--- a/include/aidge/backend/cpu/operator/TransposeImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TransposeImpl.hpp
@@ -24,22 +24,34 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class TransposeImpl2DForward_cpu
-    : public Registrable<TransposeImpl2DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>, const std::vector<DimSize_t>,const void*, void*)> {
+    : public Registrable<TransposeImpl2DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 class TransposeImpl3DForward_cpu
-    : public Registrable<TransposeImpl3DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>, const std::vector<DimSize_t>,const void*, void*)> {
+    : public Registrable<TransposeImpl3DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 class TransposeImpl4DForward_cpu
-    : public Registrable<TransposeImpl4DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>, const std::vector<DimSize_t>,const void*, void*)> {
+    : public Registrable<TransposeImpl4DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl5DForward_cpu
+    : public Registrable<TransposeImpl5DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl6DForward_cpu
+    : public Registrable<TransposeImpl6DForward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 class TransposeImpl2DBackward_cpu
-    : public Registrable<TransposeImpl2DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>, const std::vector<DimSize_t>,const void*, void*)> {
+    : public Registrable<TransposeImpl2DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<2>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 class TransposeImpl3DBackward_cpu
-    : public Registrable<TransposeImpl3DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>, const std::vector<DimSize_t>,const void*, void*)> {
+    : public Registrable<TransposeImpl3DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<3>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 class TransposeImpl4DBackward_cpu
-    : public Registrable<TransposeImpl4DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>, const std::vector<DimSize_t>,const void*, void*)> {
+    : public Registrable<TransposeImpl4DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<4>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl5DBackward_cpu
+    : public Registrable<TransposeImpl5DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<5>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
+};
+class TransposeImpl6DBackward_cpu
+    : public Registrable<TransposeImpl6DBackward_cpu, std::tuple<DataType, DataType>, void( const typename Transpose_Op<6>::Attrs& attrs, const std::vector<DimSize_t>&, const std::vector<DimSize_t>&, const void*, void*)> {
 };
 
 
@@ -76,11 +88,35 @@ public:
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
     void forward() override;
 };
+class TransposeImpl5D_cpu : public OperatorImpl {
+public:
+    TransposeImpl5D_cpu(const Transpose_Op<5>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<TransposeImpl5D_cpu> create(const Transpose_Op<5>& op) {
+        return std::make_unique<TransposeImpl5D_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+class TransposeImpl6D_cpu : public OperatorImpl {
+public:
+    TransposeImpl6D_cpu(const Transpose_Op<6>& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<TransposeImpl6D_cpu> create(const Transpose_Op<6>& op) {
+        return std::make_unique<TransposeImpl6D_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
 
 namespace {
 static Registrar<Transpose_Op<2>> registrarTransposeImpl2D_cpu("cpu", Aidge::TransposeImpl2D_cpu::create);
 static Registrar<Transpose_Op<3>> registrarTransposeImpl3D_cpu("cpu", Aidge::TransposeImpl3D_cpu::create);
 static Registrar<Transpose_Op<4>> registrarTransposeImpl4D_cpu("cpu", Aidge::TransposeImpl4D_cpu::create);
+static Registrar<Transpose_Op<5>> registrarTransposeImpl5D_cpu("cpu", Aidge::TransposeImpl5D_cpu::create);
+static Registrar<Transpose_Op<6>> registrarTransposeImpl6D_cpu("cpu", Aidge::TransposeImpl6D_cpu::create);
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp
index 79901424189b9327682376909bee087ae9eef47a..307b6d99e97dd3a4017ef4b45c109dec690a5e2a 100644
--- a/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/TransposeImpl_forward_kernels.hpp
@@ -22,7 +22,7 @@
 
 namespace Aidge {
 template <class I, class O, DimSize_t DIM>
-void TransposeImpl_cpu_forward_kernel( const typename Transpose_Op<DIM>::Attrs& attrs, const std::vector<DimSize_t> inputDims, const std::vector<DimSize_t> outputDims, const void* input_, void* output_)
+void TransposeImpl_cpu_forward_kernel( const typename Transpose_Op<DIM>::Attrs& attrs, const std::vector<DimSize_t>& inputDims, const std::vector<DimSize_t>& outputDims, const void* input_, void* output_)
 {
     O* output = static_cast<O*>(output_);
     const I* input = static_cast<const I*>(input_);
@@ -87,6 +87,20 @@ static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu
         {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 4>);
 static Registrar<TransposeImpl4DForward_cpu> registrarTransposeImpl4DForward_cpu_Float64(
         {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 4>);
+// DIM = 5
+static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 5>);
+static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 5>);
+static Registrar<TransposeImpl5DForward_cpu> registrarTransposeImpl5DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 5>);
+// DIM = 6
+static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::TransposeImpl_cpu_forward_kernel<float, float, 6>);
+static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::TransposeImpl_cpu_forward_kernel<int, int, 6>);
+static Registrar<TransposeImpl6DForward_cpu> registrarTransposeImpl6DForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::TransposeImpl_cpu_forward_kernel<double, double, 6>);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/src/operator/TransposeImpl.cpp b/src/operator/TransposeImpl.cpp
index 7485351c67e5e79219b6d4ac4ca2d58606eeebe6..c3e8aaec12eef69675d35925dbca3c49fdca4065 100644
--- a/src/operator/TransposeImpl.cpp
+++ b/src/operator/TransposeImpl.cpp
@@ -25,17 +25,22 @@ Aidge::NbElts_t Aidge::TransposeImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*i
     // this implementation can be in-place
     return 0;
 }
-
 Aidge::NbElts_t Aidge::TransposeImpl3D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return 0;
 }
-
-
 Aidge::NbElts_t Aidge::TransposeImpl4D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
     // this implementation can be in-place
     return 0;
 }
+Aidge::NbElts_t Aidge::TransposeImpl5D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+Aidge::NbElts_t Aidge::TransposeImpl6D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
 
 void Aidge::TransposeImpl2D_cpu::forward() {
     assert(mOp.getInput(0) && "missing input #0");
@@ -91,3 +96,33 @@ void Aidge::TransposeImpl4D_cpu::forward() {
                mOp.getInput(0)->getImpl()->rawPtr(),
                mOp.getOutput(0)->getImpl()->rawPtr());
 }
+void Aidge::TransposeImpl5D_cpu::forward() {
+    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.getInput(0)->nbDims() == 5 && "input #0 must have the same size as axes attributes (5)");
+
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<TransposeImpl5DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Transpose_Op<5>&>(mOp).getStaticAttributes(),
+               mOp.getInput(0)->dims(),
+               mOp.getOutput(0)->dims(),
+               mOp.getInput(0)->getImpl()->rawPtr(),
+               mOp.getOutput(0)->getImpl()->rawPtr());
+}
+void Aidge::TransposeImpl6D_cpu::forward() {
+    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.getInput(0)->nbDims() == 6 && "input #0 must have the same size as axes attributes (6)");
+
+    // Find the correct kernel type
+    auto kernelFunc =
+            Registrar<TransposeImpl6DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(dynamic_cast<const Transpose_Op<6>&>(mOp).getStaticAttributes(),
+               mOp.getInput(0)->dims(),
+               mOp.getOutput(0)->dims(),
+               mOp.getInput(0)->getImpl()->rawPtr(),
+               mOp.getOutput(0)->getImpl()->rawPtr());
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
index 327d4f8db5570cd9258259eb1844b4b3fc74c607..5ec38e52a84d0c7d2693f52e1cc0d3851cb09946 100644
--- a/unit_tests/operator/Test_TransposeImpl.cpp
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -17,89 +17,109 @@
 
 #include "aidge/backend/cpu.hpp"
 
-#include <iostream>
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Transpose(forward)") {
-    std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
-        {
-            {{0.42507452, 0.11244237, 0.43243718, 0.62354952},
-             {0.90250170, 0.48719984, 0.45781207, 0.92536664},
-             {0.06348717, 0.91678733, 0.64452291, 0.00484818}},
+    SECTION("3D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
+            {
+                {{0.42507452, 0.11244237, 0.43243718, 0.62354952},
+                {0.90250170, 0.48719984, 0.45781207, 0.92536664},
+                {0.06348717, 0.91678733, 0.64452291, 0.00484818}},
 
-            {{0.66873497, 0.99508536, 0.55714869, 0.84887981},
-             {0.41666120, 0.92365038, 0.80034822, 0.38721532},
-             {0.52037925, 0.53937608, 0.66380072, 0.36330253}}
-        }
-    });
-    std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { 
-        {
-            {{0.42507452, 0.90250170, 0.06348717},
-             {0.11244237, 0.48719984, 0.91678733},
-             {0.43243718, 0.45781207, 0.64452291},
-             {0.62354952, 0.92536664, 0.00484818}},
+                {{0.66873497, 0.99508536, 0.55714869, 0.84887981},
+                {0.41666120, 0.92365038, 0.80034822, 0.38721532},
+                {0.52037925, 0.53937608, 0.66380072, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { 
+            {
+                {{0.42507452, 0.90250170, 0.06348717},
+                {0.11244237, 0.48719984, 0.91678733},
+                {0.43243718, 0.45781207, 0.64452291},
+                {0.62354952, 0.92536664, 0.00484818}},
 
-            {{0.66873497, 0.41666120, 0.52037925},
-             {0.99508536, 0.92365038, 0.53937608},
-             {0.55714869, 0.80034822, 0.66380072},
-             {0.84887981, 0.38721532, 0.36330253}}
-        }
-    });
-    std::shared_ptr<Node> myTranspose = Transpose<3>(std::array<DimSize_t,3>{{0,2,1}});
-    myTranspose->getOperator()->setDatatype(DataType::Float32);
-    myTranspose->getOperator()->setBackend("cpu");
-    myTranspose->getOperator()->associateInput(0,input);
-    myTranspose->getOperator()->computeOutputDims();
-    myTranspose->forward();
+                {{0.66873497, 0.41666120, 0.52037925},
+                {0.99508536, 0.92365038, 0.53937608},
+                {0.55714869, 0.80034822, 0.66380072},
+                {0.84887981, 0.38721532, 0.36330253}}
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose<3>(std::array<DimSize_t,3>{{0,2,1}});
+        myTranspose->getOperator()->setDatatype(DataType::Float32);
+        myTranspose->getOperator()->setBackend("cpu");
+        myTranspose->getOperator()->associateInput(0,input);
+        myTranspose->getOperator()->computeOutputDims();
+        myTranspose->forward();
 
-    // float* resPtr = static_cast<float*>(myTranspose->getOperator()->getOutput(0)->getImpl()->rawPtr());
-    // float* expectedPtr = static_cast<float*>(output->getImpl()->rawPtr());
-    // for (std::size_t i = 0; i< 24; ++i) {
-    //     std::cout << "Res " << resPtr[i] << " , expected : " << expectedPtr[i] << std::endl;
-    //     REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
-    // }
-    REQUIRE(*(myTranspose->getOperator()->getOutput(0)) == *output);
+        REQUIRE(*(myTranspose->getOperator()->getOutput(0)) == *output);
+    }
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array4D<int,2,3,1,4> {
+            {
+                {
+                    {
+                        {1, 2, 3, 4}
+                    },
+                    {
+                        {5, 6, 7, 8}
+                    },
+                    {
+                        {9, 10, 11, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 14, 15, 16}
+                    },
+                    {
+                        {17, 18, 19, 20}
+                    },
+                    {
+                        {21, 22, 23, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array4D<int,2,4,1,3> { 
+            {
+                {
+                    {
+                        {1, 5, 9}
+                    },
+                    {
+                        {2, 6, 10}
+                    },
+                    {
+                        {3, 7, 11}
+                    },
+                    {
+                        {4, 8, 12}
+                    }
+                },
+                {
+                    {
+                        {13, 17, 21}
+                    },
+                    {
+                        {14, 18, 22}
+                    },
+                    {
+                        {15, 19, 23}
+                    },
+                    {
+                        {16, 20, 24}
+                    }
+                }
+            }
+        });
+        std::shared_ptr<Node> myTranspose = Transpose<4>(std::array<DimSize_t,4>{{0,3,2,1}});
+        myTranspose->getOperator()->setDatatype(DataType::Int32);
+        myTranspose->getOperator()->setBackend("cpu");
+        myTranspose->getOperator()->associateInput(0,input);
+        myTranspose->getOperator()->computeOutputDims();
+        myTranspose->forward();
 
-}
-
-// TEST_CASE("[cpu/operator] Transpose(forward)") {
-//     std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
-//         {
-//             {{0.0, 0.1, 0.2, 0.3},
-//              {0.4, 0.5, 0.6, 0.7},
-//              {0.8, 0.9, 1.0, 1.1}},
-
-//             {{1.2, 1.3, 1.4, 1.5},
-//              {1.6, 1.7, 1.8, 1.9},
-//              {2.0, 2.1, 2.2, 2.3}}
-//         }
-//     });
-//     std::shared_ptr<Tensor> output = std::make_shared<Tensor>(Array3D<float,2,4,3> { 
-//         {
-//             {{0.0, 0.4, 0.8},
-//              {0.1, 0.5, 0.9},
-//              {0.2, 0.6, 1.0},
-//              {0.3, 0.7, 1.1}},
-
-//             {{1.2, 1.6, 2.0},
-//              {1.3, 1.7, 2.1},
-//              {1.4, 1.8, 2.2},
-//              {1.5, 1.9, 2.3}}
-//         }
-//     });
-//     std::shared_ptr<Node> myTranspose = Transpose<3>(std::array<DimSize_t,3>{{0,2,1}});
-//     myTranspose->getOperator()->setDatatype(DataType::Float32);
-//     myTranspose->getOperator()->setBackend("cpu");
-//     myTranspose->getOperator()->associateInput(0,input);
-//     myTranspose->getOperator()->computeOutputDims();
-//     myTranspose->forward();
-
-//     float* resPtr = static_cast<float*>(myTranspose->getOperator()->getOutput(0)->getImpl()->rawPtr());
-//     float* expectedPtr = static_cast<float*>(output->getImpl()->rawPtr());
-//     for (std::size_t i = 0; i< 24; ++i) {
-//         std::cout << "Res " << resPtr[i] << " , expected : " << expectedPtr[i] << std::endl;
-//         REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
-//     }
-//     // REQUIRE(*(myTranspose->getOperator()->getOutput(0)) == *output);
-
-// }
\ No newline at end of file
+        REQUIRE(*(myTranspose->getOperator()->getOutput(0)) == *output);
+    }
+}
\ No newline at end of file