diff --git a/unit_tests/operator/Test_EqualImpl.cpp b/unit_tests/operator/Test_EqualImpl.cpp
index a229b8ce3ebcd7672323f2585e3a48343f544c3d..013e16ebbf467e6111adaba90900b6cacc06ba67 100644
--- a/unit_tests/operator/Test_EqualImpl.cpp
+++ b/unit_tests/operator/Test_EqualImpl.cpp
@@ -19,86 +19,85 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Equal(forward)", "[Equal][CPU]") {
-        SECTION("ForwardDims")
-    {
-        constexpr std::uint16_t NBTRIALS = 10;
-        // Create a random number generator
-        std::random_device rd;
-        std::mt19937 gen(rd());
-        std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
-        std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
-        std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
-        std::uniform_int_distribution<int> boolDist(0,1);
-
-        SECTION("Same dimensions") {
-            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-                DimSize_t nbDims = nbDimsDist(gen);
-                std::vector<DimSize_t> dims(nbDims);
-                for (std::size_t i = 0; i < nbDims; i++) {
-                    dims[i] = dimSizeDist(gen);
-                }
-
-                std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims);
-                myInput1->setBackend("cpu");
-                myInput1->setDataType(DataType::Float32);
-                myInput1->zeros();
-                std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims);
-                myInput2->setBackend("cpu");
-                myInput2->setDataType(DataType::Float32);
-                myInput2->zeros();
-                std::shared_ptr<Node> myEqual = Equal();
-                auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator());
-                op->associateInput(0,myInput1);
-                op->associateInput(1,myInput2);
-                op->setDataType(DataType::Float32);
-                op->setBackend("cpu");
-                op->forwardDims();
-
-                const auto outputDims = op->getOutput(0)->dims();
-                REQUIRE(outputDims == dims);
+TEST_CASE("[cpu/operator] Equal(forwardDims)", "[Equal][CPU]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_real_distribution<float> valueDist(0.1f, 1.1f); // Random float distribution between 0 and 1
+    std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2), std::size_t(10));
+    std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(1), std::size_t(5));
+    std::uniform_int_distribution<int> boolDist(0,1);
+
+    SECTION("Same dimensions") {
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            DimSize_t nbDims = nbDimsDist(gen);
+            std::vector<DimSize_t> dims(nbDims);
+            for (std::size_t i = 0; i < nbDims; i++) {
+                dims[i] = dimSizeDist(gen);
             }
+
+            std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims);
+            myInput1->setBackend("cpu");
+            myInput1->setDataType(DataType::Float32);
+            myInput1->zeros();
+            std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims);
+            myInput2->setBackend("cpu");
+            myInput2->setDataType(DataType::Float32);
+            myInput2->zeros();
+            std::shared_ptr<Node> myEqual = Equal();
+            auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator());
+            op->associateInput(0,myInput1);
+            op->associateInput(1,myInput2);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+            op->forwardDims();
+
+            const auto outputDims = op->getOutput(0)->dims();
+            REQUIRE(outputDims == dims);
         }
-        SECTION("Broadcasting") {
-            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-                DimSize_t nbDims = nbDimsDist(gen);
-                std::vector<DimSize_t> dims1(nbDims, 1);
-                std::vector<DimSize_t> dims2(nbDims, 1);
-                std::vector<DimSize_t> expectedOutDims;
-                for (std::size_t i = 0; i < nbDims; i++) {
-                    DimSize_t dim = dimSizeDist(gen);
-                    if (boolDist(gen)) {
-                        dims1[i] = dim;
-                    }
-                    if (boolDist(gen)) {
-                        dims2[i] = dim;
-                    }
-                    expectedOutDims.push_back(std::max(dims1[i],dims2[i]));
+    }
+    SECTION("Broadcasting") {
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            DimSize_t nbDims = nbDimsDist(gen);
+            std::vector<DimSize_t> dims1(nbDims, 1);
+            std::vector<DimSize_t> dims2(nbDims, 1);
+            std::vector<DimSize_t> expectedOutDims;
+            for (std::size_t i = 0; i < nbDims; i++) {
+                DimSize_t dim = dimSizeDist(gen);
+                if (boolDist(gen)) {
+                    dims1[i] = dim;
+                }
+                if (boolDist(gen)) {
+                    dims2[i] = dim;
                 }
+                expectedOutDims.push_back(std::max(dims1[i],dims2[i]));
+            }
 
 
-                std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims1);
-                myInput1->setBackend("cpu");
-                myInput1->setDataType(DataType::Float32);
-                myInput1->zeros();
-                std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims2);
-                myInput2->setBackend("cpu");
-                myInput2->setDataType(DataType::Float32);
-                myInput2->zeros();
-                std::shared_ptr<Node> myEqual = Equal();
-                auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator());
-                op->associateInput(0,myInput1);
-                op->associateInput(1,myInput2);
-                op->setDataType(DataType::Float32);
-                op->setBackend("cpu");
-
-                op->forwardDims();
-
-                const auto outputDims = op->getOutput(0)->dims();
-                REQUIRE(outputDims == expectedOutDims);
-            }
+            std::shared_ptr<Tensor> myInput1 = std::make_shared<Tensor>(dims1);
+            myInput1->setBackend("cpu");
+            myInput1->setDataType(DataType::Float32);
+            myInput1->zeros();
+            std::shared_ptr<Tensor> myInput2 = std::make_shared<Tensor>(dims2);
+            myInput2->setBackend("cpu");
+            myInput2->setDataType(DataType::Float32);
+            myInput2->zeros();
+            std::shared_ptr<Node> myEqual = Equal();
+            auto op = std::static_pointer_cast<OperatorTensor>(myEqual -> getOperator());
+            op->associateInput(0,myInput1);
+            op->associateInput(1,myInput2);
+            op->setDataType(DataType::Float32);
+            op->setBackend("cpu");
+
+            op->forwardDims();
+
+            const auto outputDims = op->getOutput(0)->dims();
+            REQUIRE(outputDims == expectedOutDims);
         }
     }
+}
+TEST_CASE("[cpu/operator] Equal(forward)", "[Equal][CPU]") {
     SECTION("Same size inputs") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
         {                                       //