diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index bf4bbb85be606fc857bf8d771b9ce211ca8e858e..58ee7355e3aec0b86991d8df22753953304dd7c9 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -64,14 +64,8 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
         return false;
     }
     const std::size_t nbDimsInput0 = getInput(0)->nbDims();
-    if (nbDimsInput0 == 0) {
-        return false;
-    }
-    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
+    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is scalar", type());
     for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        if (getInput(i)->nbDims() == 0) {
-            return false;
-        }
         AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
             "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
             i, type(), nbDimsInput0, getInput(i)->nbDims());
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 5abfff9d8202003cbe5a76a94fab9d9ab5176b6e..207229b93b0ae362f42c1bae6fb1455b5a2b9d3d 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -69,7 +69,10 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
 
             mOutputs[0]->resize(outDims);
             return true;
+        } else {
+          AIDGE_ASSERT(false, "Incompatible scalar and N-D sizes.");
         }
+
     }
 
     return false;
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 69820a924105acc8bea817aecb90e0aa278fce06..30372e44f8f9641734fc1109bf03a64794383a3e 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -32,6 +32,7 @@ const std::string Aidge::Transpose_Op::Type = "Transpose";
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
+        AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
         std::vector<DimSize_t> outputDims;
         for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
             outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
index 184c02d5208c99b903cf838784bb14fb65799111..fcdf3e8cc1bc07493cfa84608f200f9f334a29cc 100644
--- a/unit_tests/operator/Test_ConcatImpl.cpp
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -18,6 +18,14 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
+    SECTION("Concat scalar inputs") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(4);
+        auto myConcat = Concat(2, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        REQUIRE_THROWS(myConcat->forward());
+    }
     SECTION("Concat 1D inputs") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
         std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
@@ -140,4 +148,4 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
 
         REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
index d11f72474b0b70bf335dfee95d13a9b41cfe6efb..cef7bc53ef7e9247e59077028a728e9b1bb2aebe 100644
--- a/unit_tests/operator/Test_Div_Op.cpp
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
index bdd1de87c27351e943c59fa616c40dc4a0001abc..102a4ab4ec7d6262bdcc05f0c56605dfcb6af89a 100644
--- a/unit_tests/operator/Test_MatMul_Op.cpp
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -33,24 +33,24 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     /** @todo Special case of scalar Tensor objects.
      * Not handled yet.
     */
-    // SECTION("0-D / 0-D") {
-    //     std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
-    //     T0->resize({});
-    //     op -> associateInput(0,T0);
+    SECTION("0-D / 0-D") {
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        T0->resize({});
+        op -> associateInput(0,T0);
 
-    //     // input_1 - right
-    //     std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
-    //     T1->resize({});
-    //     op -> associateInput(1,T1);
+        // input_1 - right
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        T1->resize({});
+        op -> associateInput(1,T1);
 
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims()).empty());
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims()).empty());
 
-    //     // input_1 - wrong
-    //     T1->resize({dist(gen)});
+        // input_1 - wrong
+        T1->resize({dist(gen)});
 
-    //     REQUIRE_THROWS(op->forwardDims());
-    // }
+        REQUIRE_THROWS(op->forwardDims());
+    }
 
     SECTION("1-D / N-D") {
         // input_0
@@ -193,4 +193,4 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
         REQUIRE_THROWS(op -> forwardDims());
     }
 }
-} // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
index f3f8fb9522943d0a9574cb80cfc228135a973890..8efd1c2dcff0686dd3f1e589ceae6b0655c7937e 100644
--- a/unit_tests/operator/Test_Mul_Op.cpp
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
index 4a8d242a355cda58c7b36914efdb1304220f713a..90b865c1d9bd19e3fce51c2af477a9cde16e33bd 100644
--- a/unit_tests/operator/Test_Pow_Op.cpp
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
index 329f3da798854ddff3d1c1393d60c57ef180c70a..0797def124a6bbb97c4f15ae98a310a46d313181 100644
--- a/unit_tests/operator/Test_Sub_Op.cpp
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
index 8b6eafc70b7eefec6e1ccab9d0cfcde1eb4a09d5..18f0d68d87ac1ee66ffb1f24c4c130f9b020d56e 100644
--- a/unit_tests/operator/Test_TransposeImpl.cpp
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -18,6 +18,16 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Transpose(forward)") {
+    SECTION("Scalar Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(2);
+        std::shared_ptr<Node> myTranspose = Transpose({});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        REQUIRE_THROWS(myTranspose->forward());
+    }
     SECTION("3D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
             {
@@ -120,4 +130,4 @@ TEST_CASE("[cpu/operator] Transpose(forward)") {
 
         REQUIRE(*(op->getOutput(0)) == *output);
     }
-}
\ No newline at end of file
+}