From 2a82fdb5a19a130e24a1f86f214d2075739c2c54 Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Wed, 22 Nov 2023 16:57:35 +0000
Subject: [PATCH] Update tests to follow changes

---
 unit_tests/operator/Test_AddImpl.cpp          | 41 +++++++------
 unit_tests/operator/Test_AvgPoolingImpl.cpp   | 30 +++++-----
 unit_tests/operator/Test_BatchNormImpl.cpp    | 23 ++++----
 unit_tests/operator/Test_ConcatImpl.cpp       | 32 +++++-----
 .../operator/Test_ConvDepthWiseImpl.cpp       | 27 ++++-----
 unit_tests/operator/Test_ConvImpl.cpp         | 39 +++++++------
 unit_tests/operator/Test_DivImpl.cpp          | 58 ++++++++++---------
 unit_tests/operator/Test_FCImpl.cpp           | 27 +++++----
 unit_tests/operator/Test_LeakyReLUImpl.cpp    | 57 +++++++++---------
 unit_tests/operator/Test_MatMulImpl.cpp       | 25 ++++----
 unit_tests/operator/Test_MaxPoolingImpl.cpp   | 13 +++--
 unit_tests/operator/Test_MulImpl.cpp          | 29 +++++-----
 unit_tests/operator/Test_PadImpl.cpp          | 49 +++++++++-------
 unit_tests/operator/Test_PaddedConv.cpp       | 20 ++++---
 unit_tests/operator/Test_PowImpl.cpp          | 54 +++++++++--------
 unit_tests/operator/Test_ReLUImpl.cpp         | 46 ++++++++-------
 unit_tests/operator/Test_SliceImpl.cpp        | 54 +++++++++--------
 unit_tests/operator/Test_SoftmaxImpl.cpp      | 16 ++---
 unit_tests/operator/Test_SqrtImpl.cpp         | 16 ++---
 unit_tests/operator/Test_SubImpl.cpp          | 29 +++++-----
 20 files changed, 369 insertions(+), 316 deletions(-)

diff --git a/unit_tests/operator/Test_AddImpl.cpp b/unit_tests/operator/Test_AddImpl.cpp
index 2dda38c1..740b1a53 100644
--- a/unit_tests/operator/Test_AddImpl.cpp
+++ b/unit_tests/operator/Test_AddImpl.cpp
@@ -18,7 +18,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Add(forward)", "[Add]") {
+TEST_CASE("[cpu/operator] Add(forward)", "[Add][CPU]") {
     std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,3,3,3,2> {
         {                                       //
             {                                   //
@@ -41,13 +41,14 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add]") {
 
     SECTION("One input") {
         std::shared_ptr<Node> myAdd = Add(1);
-        myAdd->getOperator()->setBackend("cpu");
-        myAdd->getOperator()->setDataType(DataType::Int32);
-        myAdd->getOperator()->associateInput(0, input1);
-        myAdd->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input1);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
+        op->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(myAdd->getOperator()->output(0) == *input1);
+        REQUIRE(*(op->getOutput(0)) == *input1);
     }
 
     SECTION("Two inputs") {
@@ -72,14 +73,15 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add]") {
         });
 
         std::shared_ptr<Node> myAdd = Add(2);
-        myAdd->getOperator()->setDataType(DataType::Int32);
-        myAdd->getOperator()->setBackend("cpu");
-        myAdd->getOperator()->associateInput(0, input1);
-        myAdd->getOperator()->associateInput(1, input1);
-        myAdd->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input1);
+        op->associateInput(1, input1);
+        op->setBackend("cpu");
+        op->setDataType(DataType::Int32);
+        op->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("Three inputs") {
@@ -104,14 +106,15 @@ TEST_CASE("[cpu/operator] Add(forward)", "[Add]") {
         });
 
         std::shared_ptr<Node> myAdd = Add(3);
-        myAdd->getOperator()->setDataType(DataType::Int32);
-        myAdd->getOperator()->setBackend("cpu");
-        myAdd->getOperator()->associateInput(0, input1);
-        myAdd->getOperator()->associateInput(1, input1);
-        myAdd->getOperator()->associateInput(2, input1);
-        myAdd->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myAdd -> getOperator());
+        op->associateInput(0, input1);
+        op->associateInput(1, input1);
+        op->associateInput(2, input1);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myAdd->forward();
 
-        REQUIRE(*std::static_pointer_cast<Tensor>(myAdd->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_AvgPoolingImpl.cpp b/unit_tests/operator/Test_AvgPoolingImpl.cpp
index c1a671c8..c4abf020 100644
--- a/unit_tests/operator/Test_AvgPoolingImpl.cpp
+++ b/unit_tests/operator/Test_AvgPoolingImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] AvgPooling(forward)") {
+TEST_CASE("[cpu/operator] AvgPooling(forward)", "[AvgPooling][CPU]") {
     std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float,2,2,5,5> { //NCHW
         {
             {
@@ -53,8 +53,7 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)") {
     });
     SECTION("Stride") {
         std::shared_ptr<Node> myAvgPool = AvgPooling({2,2}, "mycdw", {2,2});
-        myAvgPool->getOperator()->setDataType(DataType::Float32);
-        myAvgPool->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
 
         std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> {
             {
@@ -72,11 +71,13 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)") {
                 }
             }
         });
-        myAvgPool->getOperator()->associateInput(0,myInput);
-        myAvgPool->getOperator()->computeOutputDims();
+        op->associateInput(0,myInput);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myAvgPool->forward();
-        myAvgPool->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myAvgPool->getOperator()->getOutput(0)) == *myOutput);
+        op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Stride >= feature dim") {
@@ -90,21 +91,22 @@ TEST_CASE("[cpu/operator] AvgPooling(forward)") {
         }
         });
         std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mycdw", {3,3});
-        myAvgPool->getOperator()->setDataType(DataType::Float32);
-        myAvgPool->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
 
         Tensor myOutput = Array4D<float,1,1,1,1> {
             {{{{(0.3745 + 0.9507 + 0.7320 + 0.5987 + 0.1560 + 0.1560 + 0.0581 + 0.8662 + 0.6011)/9.0}}}}
         };
-        myAvgPool->getOperator()->associateInput(0,myInput2);
-        myAvgPool->getOperator()->computeOutputDims();
+        op->associateInput(0,myInput2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myAvgPool->forward();
-        myAvgPool->getOperator()->getOutput(0)->print();
-        float* outPtr = static_cast<float*>(myAvgPool->getOperator()->output(0).getImpl()->rawPtr());
+        op->getOutput(0)->print();
+        float* outPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedOutPtr = static_cast<float*>(myOutput.getImpl()->rawPtr());
         for (std::size_t i = 0; i < 1; ++i) {
             REQUIRE(std::abs(outPtr[i] - expectedOutPtr[i]) < 0.00001);
         }
     }
-    // std::cout << static_cast<Tensor>((*myAvgPool->getOperator())["weight"])[0][0][0][0] << std::endl;
+    // std::cout << static_cast<Tensor>((*op)["weight"])[0][0][0][0] << std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_BatchNormImpl.cpp b/unit_tests/operator/Test_BatchNormImpl.cpp
index dfc31b87..e6b7c3c6 100644
--- a/unit_tests/operator/Test_BatchNormImpl.cpp
+++ b/unit_tests/operator/Test_BatchNormImpl.cpp
@@ -19,10 +19,9 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] BatchNorm(forward)") {
+TEST_CASE("[cpu/operator] BatchNorm(forward)", "[BatchNorm][CPU]") {
     std::shared_ptr<Node> myBatchNorm = BatchNorm<2>(0.00001F, 0.1F, "mybatchnorm");
-    myBatchNorm->getOperator()->setDataType(DataType::Float32);
-    myBatchNorm->getOperator()->setBackend("cpu");
+    auto op = std::static_pointer_cast<OperatorTensor>(myBatchNorm -> getOperator());
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array1D<float,3> {{0.9044, 0.3028, 0.0218}});
     std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<float,3> {{0.1332, 0.7503, 0.0878}});
     std::shared_ptr<Tensor> myMean = std::make_shared<Tensor>(Array1D<float,3> {{0.9931, 0.8421, 0.9936}});
@@ -79,19 +78,21 @@ TEST_CASE("[cpu/operator] BatchNorm(forward)") {
             }
         }
     });
-    myBatchNorm->getOperator()->associateInput(0,myInput);
-    myBatchNorm->getOperator()->associateInput(1,myWeights);
-    myBatchNorm->getOperator()->associateInput(2,myBias);
-    myBatchNorm->getOperator()->associateInput(3,myMean);
-    myBatchNorm->getOperator()->associateInput(4,myVar);
-    myBatchNorm->getOperator()->computeOutputDims();
+    op->associateInput(0,myInput);
+    op->associateInput(1,myWeights);
+    op->associateInput(2,myBias);
+    op->associateInput(3,myMean);
+    op->associateInput(4,myVar);
+    op->setDataType(DataType::Float32);
+    op->setBackend("cpu");
+    op->computeOutputDims();
     myBatchNorm->forward();
 
-    float* resPtr = static_cast<float*>(myBatchNorm->getOperator()->getOutput(0)->getImpl()->rawPtr());
+    float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
     float* expectedPtr = static_cast<float*>(myOutput->getImpl()->rawPtr());
     for (std::size_t i = 0; i< 54; ++i) {
         REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
     }
 
-    // std::cout << static_cast<Tensor>((*myBatchNorm->getOperator())["weight"])[0][0][0][0] << std::endl;
+    // std::cout << static_cast<Tensor>((*op)["weight"])[0][0][0][0] << std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
index fe130227..7f616fcb 100644
--- a/unit_tests/operator/Test_ConcatImpl.cpp
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -18,7 +18,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Concat(forward)", "[Concat]") {
+TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
     SECTION("Concat 1D inputs") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
         std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
@@ -30,19 +30,19 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat]") {
             { 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16,17,18,19,20,21 }});
 
         auto myConcat = Concat(5, 0);
-        myConcat->getOperator()->setBackend("cpu");
-        myConcat->getOperator()->setDatatype(DataType::Int32);
         myConcat->getOperator()->associateInput(0, input1);
         myConcat->getOperator()->associateInput(1, input2);
         myConcat->getOperator()->associateInput(2, input3);
         myConcat->getOperator()->associateInput(3, input4);
         myConcat->getOperator()->associateInput(4, input5);
-        myConcat->getOperator()->computeOutputDims();
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->computeOutputDims();
         myConcat->forward();
 
-        myConcat->getOperator()->getOutput(0)->print();
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
 
-        REQUIRE(myConcat->getOperator()->output(0) == *expectedOutput);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
     }
     SECTION("Concat 4D inputs on 1st axis") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array4D<int,1,3,3,2> {
@@ -90,16 +90,16 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat]") {
         });                                         //
 
         auto myConcat = Concat(2, 0);
-        myConcat->getOperator()->setBackend("cpu");
-        myConcat->getOperator()->setDatatype(DataType::Int32);
         myConcat->getOperator()->associateInput(0, input1);
         myConcat->getOperator()->associateInput(1, input2);
-        myConcat->getOperator()->computeOutputDims();
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->computeOutputDims();
         myConcat->forward();
 
-        myConcat->getOperator()->getOutput(0)->print();
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0)->print();
 
-        REQUIRE(myConcat->getOperator()->output(0) == *expectedOutput);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
     }
 
     SECTION("Concat 4D inputs on 3rd axis") {
@@ -133,15 +133,15 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat]") {
         });                                                                                               //
 
         auto myConcat = Concat(2, 2);
-        myConcat->getOperator()->setBackend("cpu");
-        myConcat->getOperator()->setDatatype(DataType::Int32);
         myConcat->getOperator()->associateInput(0, input1);
         myConcat->getOperator()->associateInput(1, input2);
-        myConcat->getOperator()->computeOutputDims();
+        myConcat->getOperator()->setBackend("cpu");
+        myConcat->getOperator()->setDataType(DataType::Int32);
+        std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->computeOutputDims();
         myConcat->forward();
 
-        myConcat->getOperator()->getOutput(0)->print();
+        std::static_pointer_cast<Tensor>(myConcat->getOperator()->getRawOutput(0))->print();
 
-        REQUIRE(myConcat->getOperator()->output(0) == *expectedOutput);
+        REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ConvDepthWiseImpl.cpp b/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
index 9ff62b28..112703b6 100644
--- a/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
+++ b/unit_tests/operator/Test_ConvDepthWiseImpl.cpp
@@ -19,11 +19,10 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] ConvDepthWise(forward)") {
+TEST_CASE("[cpu/operator] ConvDepthWise(forward)", "[ConvDepthWise][CPU]") {
     std::shared_ptr<Node> myCDW = ConvDepthWise(4, {3,3}, "mycdw");
-    myCDW->getOperator()->setDataType(DataType::Int32);
-    myCDW->getOperator()->setBackend("cpu");
-    std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array3D<int,4,1,3,3> {
+    auto op = std::static_pointer_cast<OperatorTensor>(myCDW -> getOperator());
+    std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,1,3,3> {
         {
             {{
                 {  0,  1,  2},
@@ -142,13 +141,15 @@ TEST_CASE("[cpu/operator] ConvDepthWise(forward)") {
             }
         }
     });
-    myCDW->getOperator()->associateInput(0,myInput);
-    myCDW->getOperator()->associateInput(1,myWeights);
-    myCDW->getOperator()->associateInput(2,myBias);
-    myCDW->getOperator()->computeOutputDims();
-    myCDW->forward();
-    myCDW->getOperator()->getOutput(0)->print();
-    REQUIRE(*(myCDW->getOperator()->getOutput(0)) == *myOutput);
-
-    // std::cout << static_cast<Tensor>((*myCDW->getOperator())["weight"])[0][0][0][0] << std::endl;
+    op -> associateInput(0, myInput);
+    op -> associateInput(1, myWeights);
+    op -> associateInput(2, myBias);
+    op->setDataType(DataType::Int32);
+    op->setBackend("cpu");
+    op -> computeOutputDims();
+    myCDW -> forward();
+    op -> getOutput(0) -> print();
+    REQUIRE(*(op -> getOutput(0)) == *myOutput);
+
+    // std::cout << static_cast<Tensor>((*op)["weight"])[0][0][0][0] << std::endl;
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_ConvImpl.cpp b/unit_tests/operator/Test_ConvImpl.cpp
index 5bf11810..0f46e8f6 100644
--- a/unit_tests/operator/Test_ConvImpl.cpp
+++ b/unit_tests/operator/Test_ConvImpl.cpp
@@ -20,11 +20,10 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Conv(forward)") {
+TEST_CASE("[cpu/operator] Conv(forward)", "[Conv][CPU]") {
     SECTION("Classic Conv") {
         std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv");
-        myConv->getOperator()->setDataType(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
         std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
             {
                 {
@@ -148,19 +147,20 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                 }
             }
         });
-        myConv->getOperator()->associateInput(0,myInput);
-        myConv->getOperator()->associateInput(1,myWeights);
-        myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->computeOutputDims();
+        op->associateInput(0,myInput);
+        op->associateInput(1,myWeights);
+        op->associateInput(2,myBias);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
-        // myConv->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+        // op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("Point-wise") {
         std::shared_ptr<Node> myConv = Conv(3,4,{1,1}, "myconv", {1,1});
-        myConv->getOperator()->setDataType(DataType::Float32);
-        myConv->getOperator()->setBackend("cpu");
-        myConv->getOperator()->input(0) = Array4D<float,2,3,3,3> {
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
+        op->setInput(0, std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
             {
                 {
                     {{-1.38467371F, -0.87123615F, -0.22336592F},
@@ -185,8 +185,8 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                      { 0.09811721F,  1.74225271F, -1.35267365F}}
                 }
             }
-        };
-        myConv->getOperator()->input(1) = Array4D<float,4,3,1,1> {
+        }));
+        op->setInput(1, std::make_shared<Tensor>(Array4D<float,4,3,1,1> {
             {
                 {
                     {{ 0.33669037F}},
@@ -208,8 +208,8 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                     {{ 0.80935723F}}
                 }
             }
-        };
-        myConv->getOperator()->input(2) = Array1D<float,4> {{ 1.11029029F, -1.68979895F, -0.98895991F,  0.95797181F}};
+        }));
+        op->setInput(2, std::make_shared<Tensor>(Array1D<float,4> {{ 1.11029029F, -1.68979895F, -0.98895991F,  0.95797181F}}));
         Tensor expectedOutput = Array4D<float,2,4,3,3> {
             {
                 {
@@ -242,11 +242,12 @@ TEST_CASE("[cpu/operator] Conv(forward)") {
                 }
             }
         };
-
-        myConv->getOperator()->computeOutputDims();
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
 
-        float* resPtr = static_cast<float*>(myConv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput.getImpl()->rawPtr());
         for (std::size_t i = 0; i< expectedOutput.size(); ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
index 7a631b4c..16f69db9 100644
--- a/unit_tests/operator/Test_DivImpl.cpp
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Div(forward)") {
+TEST_CASE("[cpu/operator] Div(forward)", "[Div][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDataType(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
-        myDiv->forward();
-
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
+        myDiv -> forward();
+
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -73,14 +74,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDataType(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
         myDiv->forward();
 
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDataType(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
         myDiv->forward();
 
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -191,14 +194,15 @@ TEST_CASE("[cpu/operator] Div(forward)") {
         });
 
         std::shared_ptr<Node> myDiv = Div();
-        myDiv->getOperator()->setDataType(DataType::Float32);
-        myDiv->getOperator()->setBackend("cpu");
-        myDiv->getOperator()->associateInput(0, input_1);
-        myDiv->getOperator()->associateInput(1, input_2);
-        myDiv->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+        op -> associateInput(0, input_1);
+        op -> associateInput(1, input_2);
+        op -> setDataType(DataType::Float32);
+        op -> setBackend("cpu");
+        op -> computeOutputDims();
         myDiv->forward();
 
-        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_FCImpl.cpp b/unit_tests/operator/Test_FCImpl.cpp
index 1df64893..4309ce1a 100644
--- a/unit_tests/operator/Test_FCImpl.cpp
+++ b/unit_tests/operator/Test_FCImpl.cpp
@@ -19,7 +19,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/oeprator] FC(forward)") {
+TEST_CASE("[cpu/oeprator] FC(forward)", "[FC][CPU]") {
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
             {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
               5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,  5,  6,  7,  8,
@@ -45,11 +45,10 @@ TEST_CASE("[cpu/oeprator] FC(forward)") {
     std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
             {{23601, 23602, 23603, 23604, 23605}, {68601, 68602, 68603, 68604, 68605}}});
 
-    std::shared_ptr<Node> myFC = FC(5, false, "myfc");
-    myFC->getOperator()->setDataType(DataType::Int32);
-    myFC->getOperator()->setBackend("cpu");
-    myFC->getOperator()->associateInput(1, myWeights);
-    myFC->getOperator()->associateInput(2, myBias);
+    std::shared_ptr<Node> myFC = FC(75, 5, false, "myfc");
+    auto op = std::static_pointer_cast<OperatorTensor>(myFC -> getOperator());
+    op -> associateInput(1, myWeights);
+    op -> associateInput(2, myBias);
 
     SECTION("2D input") {
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
@@ -62,10 +61,12 @@ TEST_CASE("[cpu/oeprator] FC(forward)") {
                   105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
                   120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
                   135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
-        myFC->getOperator()->associateInput(0, myInput);
-        myFC->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op -> setDataType(DataType::Int32);
+        op -> setBackend("cpu");
+        op->computeOutputDims();
         myFC->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("4D input") {
         std::shared_ptr<Tensor> myInput =
@@ -99,10 +100,12 @@ TEST_CASE("[cpu/oeprator] FC(forward)") {
                                                                      {135, 136, 137, 138, 139},
                                                                      {140, 141, 142, 143, 144},
                                                                      {145, 146, 147, 148, 149}}}}});
-        myFC->getOperator()->associateInput(0, myInput);
-        myFC->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op -> setDataType(DataType::Int32);
+        op -> setBackend("cpu");
+        op->computeOutputDims();
         myFC->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myFC->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     // std::cout << static_cast<Tensor>((*myFC->getOperator())["weight"])[0][0][0][0] << std::endl;
diff --git a/unit_tests/operator/Test_LeakyReLUImpl.cpp b/unit_tests/operator/Test_LeakyReLUImpl.cpp
index d2590fd5..cad2a6f9 100644
--- a/unit_tests/operator/Test_LeakyReLUImpl.cpp
+++ b/unit_tests/operator/Test_LeakyReLUImpl.cpp
@@ -18,7 +18,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
+TEST_CASE("[cpu/operator] LeakyReLU(forward)", "[LeakyReLU][CPU]") {
     SECTION("1D Tensor") {
         std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
             {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
@@ -28,12 +28,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myLeakyReLU->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("2D Tensor") {
@@ -51,12 +52,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("3D Tensor") {
@@ -86,12 +88,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("4D Tensor") {
@@ -145,12 +148,13 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU();
-        myLeakyReLU->getOperator()->setDataType(DataType::Int32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("Test construction attribute: negative_slop") {
@@ -162,11 +166,12 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") {
         });
 
         std::shared_ptr<Node> myLeakyReLU = LeakyReLU(0.5f);
-        myLeakyReLU->getOperator()->setDataType(DataType::Float32);
-        myLeakyReLU->getOperator()->setBackend("cpu");
-        myLeakyReLU->getOperator()->associateInput(0,input0);
-        myLeakyReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myLeakyReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myLeakyReLU->forward();
-        REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MatMulImpl.cpp b/unit_tests/operator/Test_MatMulImpl.cpp
index 33808717..1edb915f 100644
--- a/unit_tests/operator/Test_MatMulImpl.cpp
+++ b/unit_tests/operator/Test_MatMulImpl.cpp
@@ -19,7 +19,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
+TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul][CPU]") {
     // Test MatMul forward with batch size = 2 and feature size = 75
     std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array2D<int, 5, 75>{
             {{1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 1,  2,  3,  4,
@@ -45,10 +45,9 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
     std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array2D<int, 2, 5>{
             {{23600, 23600, 23600, 23600, 23600}, {68600, 68600, 68600, 68600, 68600}}});
 
-    std::shared_ptr<Node> myMatMul = MatMul(5, "mymatmul");
-    myMatMul->getOperator()->setDataType(DataType::Int32);
-    myMatMul->getOperator()->setBackend("cpu");
-    myMatMul->getOperator()->associateInput(1, myWeights);
+    std::shared_ptr<Node> myMatMul = MatMul(75, 5, "mymatmul");
+    auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
+    op->associateInput(1, myWeights);
 
     SECTION("2D input") {
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array2D<int, 2, 75>{
@@ -61,10 +60,12 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
                   105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
                   120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
                   135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149}}});
-        myMatMul->getOperator()->associateInput(0, myInput);
-        myMatMul->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myMatMul->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("4D input") {
         std::shared_ptr<Tensor> myInput =
@@ -98,10 +99,12 @@ TEST_CASE("[cpu/operator] MatMul(forward)", "[MatMul]") {
                                                                      {135, 136, 137, 138, 139},
                                                                      {140, 141, 142, 143, 144},
                                                                      {145, 146, 147, 148, 149}}}}});
-        myMatMul->getOperator()->associateInput(0, myInput);
-        myMatMul->getOperator()->computeOutputDims();
+        op->associateInput(0, myInput);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myMatMul->forward();
-        REQUIRE(*std::static_pointer_cast<Tensor>(myMatMul->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     // std::cout << static_cast<Tensor>((*myMatMul->getOperator())["weight"])[0][0][0][0] << std::endl;
diff --git a/unit_tests/operator/Test_MaxPoolingImpl.cpp b/unit_tests/operator/Test_MaxPoolingImpl.cpp
index 23883c13..f954ab2c 100644
--- a/unit_tests/operator/Test_MaxPoolingImpl.cpp
+++ b/unit_tests/operator/Test_MaxPoolingImpl.cpp
@@ -21,7 +21,7 @@
 using namespace Aidge;
 
 
-TEST_CASE("[cpu/operator] MaxPooling(forward)") {
+TEST_CASE("[cpu/operator] MaxPooling(forward)", "[MaxPooling][CPU]") {
     std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<float,2,2,5,5> { //NCHW
         {
             {
@@ -54,8 +54,7 @@ TEST_CASE("[cpu/operator] MaxPooling(forward)") {
     });
     SECTION("Stride") {
         std::shared_ptr<Node> myMaxPool = MaxPooling({2,2}, "mycdw", {2,2});
-        myMaxPool->getOperator()->setDataType(DataType::Float32);
-        myMaxPool->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMaxPool -> getOperator());
 
         std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> {
             {
@@ -74,9 +73,11 @@ TEST_CASE("[cpu/operator] MaxPooling(forward)") {
             }
         });
         myMaxPool->getOperator()->associateInput(0,myInput);
-        myMaxPool->getOperator()->computeOutputDims();
+        myMaxPool->getOperator()->setDataType(DataType::Float32);
+        myMaxPool->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMaxPool->forward();
-        myMaxPool->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myMaxPool->getOperator()->getOutput(0)) == *myOutput);
+        op->output(0).print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
index a04a2a53..1707bc81 100644
--- a/unit_tests/operator/Test_MulImpl.cpp
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Mul(forward)") {
+TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Mul(forward)") {
         });
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->computeOutputDims();
+        myMul->getOperator()->setDataType(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMul->forward();
 
-        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -73,14 +74,15 @@ TEST_CASE("[cpu/operator] Mul(forward)") {
         });
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->computeOutputDims();
+        myMul->getOperator()->setDataType(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMul->forward();
 
-        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Mul(forward)") {
         });
 
         std::shared_ptr<Node> myMul = Mul();
-        myMul->getOperator()->setDataType(DataType::Float32);
-        myMul->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
         myMul->getOperator()->associateInput(0, input_1);
         myMul->getOperator()->associateInput(1, input_2);
-        myMul->getOperator()->computeOutputDims();
+        myMul->getOperator()->setDataType(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myMul->forward();
 
-        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_PadImpl.cpp b/unit_tests/operator/Test_PadImpl.cpp
index 6c3b4b71..edcdaa96 100644
--- a/unit_tests/operator/Test_PadImpl.cpp
+++ b/unit_tests/operator/Test_PadImpl.cpp
@@ -20,13 +20,12 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Pad(forward)") {
+TEST_CASE("[cpu/operator] Pad(forward)", "[Pad][CPU]") {
     SECTION("Symmetric Pad") {
         const int pv = 0; // pad value
 
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
-        myPad->getOperator()->setDataType(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -125,18 +124,19 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Asymmetric Pad") {
         const int pv = 0; // pad value
 
         std::shared_ptr<Node> myPad = Pad<2>({1, 0, 0, 1}, "mypad", PadBorderType::Constant, static_cast<double>(pv));
-        myPad->getOperator()->setDataType(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -229,16 +229,17 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Pad Edge") {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Edge);
-        myPad->getOperator()->setDataType(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -337,16 +338,17 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Pad Reflect") {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Reflect);
-        myPad->getOperator()->setDataType(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -453,16 +455,17 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
-         myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        op->getOutput(0)->print();
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 
     SECTION("Pad Wrap") {
         std::shared_ptr<Node> myPad = Pad<2>({1, 1, 1, 1}, "mypad", PadBorderType::Wrap);
-        myPad->getOperator()->setDataType(DataType::Int32);
-        myPad->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myPad -> getOperator());
         std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW
             {
                 {
@@ -561,9 +564,11 @@ TEST_CASE("[cpu/operator] Pad(forward)") {
         });
 
         myPad->getOperator()->associateInput(0,myInput);
-        myPad->getOperator()->computeOutputDims();
+        myPad->getOperator()->setDataType(DataType::Int32);
+        myPad->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myPad->forward();
         // myPad->getOperator()->getOutput(0)->print();
-        REQUIRE(*(myPad->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_PaddedConv.cpp b/unit_tests/operator/Test_PaddedConv.cpp
index 9a9d40b7..3baf0a7a 100644
--- a/unit_tests/operator/Test_PaddedConv.cpp
+++ b/unit_tests/operator/Test_PaddedConv.cpp
@@ -21,11 +21,10 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] PaddedConv(forward)") {
+TEST_CASE("[cpu/operator] PaddedConv(forward)", "[PaddedConv][CPU]") {
     SECTION("Classic Conv") {
         std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv");
-        myConv->getOperator()->setDataType(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
         std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
             {
                 {
@@ -153,15 +152,16 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)") {
         myConv->getOperator()->associateInput(0,myInput);
         myConv->getOperator()->associateInput(1,myWeights);
         myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->computeOutputDims();
+        myConv->getOperator()->setDataType(DataType::Int32);
+        myConv->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
 
-        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
     SECTION("test Padding") {
         std::shared_ptr<Node> myConv = PaddedConv(3,4,{3,3}, "myconv", {1,1}, {1,1,1,1});
-        myConv->getOperator()->setDataType(DataType::Int32);
-        myConv->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(myConv -> getOperator());
         std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> {
             {
                 {
@@ -311,9 +311,11 @@ TEST_CASE("[cpu/operator] PaddedConv(forward)") {
         myConv->getOperator()->associateInput(0,myInput);
         myConv->getOperator()->associateInput(1,myWeights);
         myConv->getOperator()->associateInput(2,myBias);
-        myConv->getOperator()->computeOutputDims();
+        myConv->getOperator()->setDataType(DataType::Int32);
+        myConv->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         myConv->forward();
 
-        REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput);
+        REQUIRE(*(op->getOutput(0)) == *myOutput);
     }
 }
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
index 0b3595f4..0c95e785 100644
--- a/unit_tests/operator/Test_PowImpl.cpp
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Pow(forward)") {
+TEST_CASE("[cpu/operator] Pow(forward)", "[Pow][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDataType(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -76,14 +77,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDataType(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDataType(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -187,14 +190,15 @@ TEST_CASE("[cpu/operator] Pow(forward)") {
         });
 
         std::shared_ptr<Node> myPow = Pow();
-        myPow->getOperator()->setDataType(DataType::Float32);
-        myPow->getOperator()->setBackend("cpu");
-        myPow->getOperator()->associateInput(0, input_1);
-        myPow->getOperator()->associateInput(1, input_2);
-        myPow->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+        op->associateInput(0, input_1);
+        op->associateInput(1, input_2);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myPow->forward();
 
-        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_ReLUImpl.cpp b/unit_tests/operator/Test_ReLUImpl.cpp
index 286ff084..c4166ac4 100644
--- a/unit_tests/operator/Test_ReLUImpl.cpp
+++ b/unit_tests/operator/Test_ReLUImpl.cpp
@@ -21,7 +21,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] ReLU(forward)") {
+TEST_CASE("[cpu/operator] ReLU(forward)", "[ReLU][CPU]") {
     SECTION("1D Tensor") {
         std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
             {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
@@ -31,12 +31,13 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDataType(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("2D Tensor") {
@@ -54,12 +55,13 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDataType(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*myReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
 
     SECTION("3D Tensor") {
@@ -89,12 +91,13 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDataType(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*(myReLU->getOperator()->getOutput(0)) == *expectedOutput);
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
     }
 
     SECTION("4D Tensor") {
@@ -148,11 +151,12 @@ TEST_CASE("[cpu/operator] ReLU(forward)") {
         });
 
         std::shared_ptr<Node> myReLU = ReLU();
-        myReLU->getOperator()->setDataType(DataType::Int32);
-        myReLU->getOperator()->setBackend("cpu");
-        myReLU->getOperator()->associateInput(0,input0);
-        myReLU->getOperator()->computeOutputDims();
+        auto op = std::static_pointer_cast<OperatorTensor>(myReLU -> getOperator());
+        op->associateInput(0,input0);
+        op->setDataType(DataType::Int32);
+        op->setBackend("cpu");
+        op->computeOutputDims();
         myReLU->forward();
-        REQUIRE(*myReLU->getOperator()->getOutput(0) == *expectedOutput);
+        REQUIRE(*op->getOutput(0) == *expectedOutput);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SliceImpl.cpp b/unit_tests/operator/Test_SliceImpl.cpp
index bc129dae..c291268a 100644
--- a/unit_tests/operator/Test_SliceImpl.cpp
+++ b/unit_tests/operator/Test_SliceImpl.cpp
@@ -18,7 +18,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Slice(forward)", "[Slice]") {
+TEST_CASE("[cpu/operator] Slice(forward)", "[Slice][CPU]") {
     SECTION("1D Tensor") {
         std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<int,10> {
             {0, 1, 2,-3, 4,-5,-6, 7, 8, 9}
@@ -28,15 +28,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice]") {
         });
 
         std::shared_ptr<Node> mySlice = Slice(0, {4});
-        mySlice->getOperator()->setDatatype(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->computeOutputDims();
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySlice->forward();
         // mySlice->getOperator()->output(0).print();
-        REQUIRE(mySlice->getOperator()->output(0) == *expectedOutput);
-        REQUIRE(mySlice->getOperator()->output(0).dims() == expectedOutput->dims());
-        REQUIRE(mySlice->getOperator()->output(0).dataType() == expectedOutput->dataType());
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->output(0).dims() == expectedOutput->dims());
+        REQUIRE(op->output(0).dataType() == expectedOutput->dataType());
     }
 
     SECTION("2D Tensor") {
@@ -54,15 +55,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice]") {
         });
 
         std::shared_ptr<Node> mySlice = Slice(5, {2,3});
-        mySlice->getOperator()->setDatatype(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->computeOutputDims();
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySlice->forward();
         // mySlice->getOperator()->output(0).print();
-        REQUIRE(*mySlice->getOperator()->getOutput(0) == *expectedOutput);
-        REQUIRE(mySlice->getOperator()->output(0).dims() == expectedOutput->dims());
-        REQUIRE(mySlice->getOperator()->output(0).dataType() == expectedOutput->dataType());
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->output(0).dims() == expectedOutput->dims());
+        REQUIRE(op->output(0).dataType() == expectedOutput->dataType());
     }
 
     SECTION("3D Tensor") {
@@ -87,15 +89,16 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice]") {
         });
 
         std::shared_ptr<Node> mySlice = Slice(14, {1,1,3});
-        mySlice->getOperator()->setDatatype(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->computeOutputDims();
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySlice->forward();
         // mySlice->getOperator()->output(0).print();
-        REQUIRE(mySlice->getOperator()->output(0) == *expectedOutput);
-        REQUIRE(mySlice->getOperator()->output(0).dims() == expectedOutput->dims());
-        REQUIRE(mySlice->getOperator()->output(0).dataType() == expectedOutput->dataType());
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->output(0).dims() == expectedOutput->dims());
+        REQUIRE(op->output(0).dataType() == expectedOutput->dataType());
     }
 
     SECTION("4D Tensor") {
@@ -149,14 +152,15 @@ TEST_CASE("[cpu/operator] Slice(forward)", "[Slice]") {
         });
 
         std::shared_ptr<Node> mySlice = Slice(0, {2,2,2,10});
-        mySlice->getOperator()->setDatatype(DataType::Int32);
-        mySlice->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySlice -> getOperator());
         mySlice->getOperator()->associateInput(0,input0);
-        mySlice->getOperator()->computeOutputDims();
+        mySlice->getOperator()->setDataType(DataType::Int32);
+        mySlice->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySlice->forward();
         // mySlice->getOperator()->output(0).print();
-        REQUIRE(mySlice->getOperator()->output(0) == *expectedOutput);
-        REQUIRE(mySlice->getOperator()->output(0).dims() == expectedOutput->dims());
-        REQUIRE(mySlice->getOperator()->output(0).dataType() == expectedOutput->dataType());
+        REQUIRE(*(op->getOutput(0)) == *expectedOutput);
+        REQUIRE(op->output(0).dims() == expectedOutput->dims());
+        REQUIRE(op->output(0).dataType() == expectedOutput->dataType());
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SoftmaxImpl.cpp b/unit_tests/operator/Test_SoftmaxImpl.cpp
index 44337e37..3d3c9fe4 100644
--- a/unit_tests/operator/Test_SoftmaxImpl.cpp
+++ b/unit_tests/operator/Test_SoftmaxImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Softmax(forward)") {
+TEST_CASE("[cpu/operator] Softmax(forward)", "[Softmax][CPU]") {
     SECTION("2D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,10> {
             {
@@ -40,13 +40,14 @@ TEST_CASE("[cpu/operator] Softmax(forward)") {
         });
 
         std::shared_ptr<Node> mySoftmax = Softmax();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator());
+        mySoftmax->getOperator()->associateInput(0,input);
         mySoftmax->getOperator()->setDataType(DataType::Float32);
         mySoftmax->getOperator()->setBackend("cpu");
-        mySoftmax->getOperator()->associateInput(0,input);
-        mySoftmax->getOperator()->computeOutputDims();
+        op->computeOutputDims();
         mySoftmax->forward();
 
-        float* resPtr = static_cast<float*>(mySoftmax->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 20; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -108,13 +109,14 @@ TEST_CASE("[cpu/operator] Softmax(forward)") {
         });
 
         std::shared_ptr<Node> mySoftmax = Softmax();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySoftmax -> getOperator());
+        mySoftmax->getOperator()->associateInput(0,input);
         mySoftmax->getOperator()->setDataType(DataType::Float32);
         mySoftmax->getOperator()->setBackend("cpu");
-        mySoftmax->getOperator()->associateInput(0,input);
-        mySoftmax->getOperator()->computeOutputDims();
+        op->computeOutputDims();
         mySoftmax->forward();
 
-        float* resPtr = static_cast<float*>(mySoftmax->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_SqrtImpl.cpp b/unit_tests/operator/Test_SqrtImpl.cpp
index ceb57e6f..653ecf0d 100644
--- a/unit_tests/operator/Test_SqrtImpl.cpp
+++ b/unit_tests/operator/Test_SqrtImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Sqrt(forward)") {
+TEST_CASE("[cpu/operator] Sqrt(forward)", "[Sqrt][CPU]") {
     SECTION("2D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -36,13 +36,14 @@ TEST_CASE("[cpu/operator] Sqrt(forward)") {
         });
 
         std::shared_ptr<Node> mySqrt = Sqrt();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySqrt -> getOperator());
+        mySqrt->getOperator()->associateInput(0,input);
         mySqrt->getOperator()->setDataType(DataType::Float32);
         mySqrt->getOperator()->setBackend("cpu");
-        mySqrt->getOperator()->associateInput(0,input);
-        mySqrt->getOperator()->computeOutputDims();
+        op->computeOutputDims();
         mySqrt->forward();
 
-        float* resPtr = static_cast<float*>(mySqrt->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -106,13 +107,14 @@ TEST_CASE("[cpu/operator] Sqrt(forward)") {
         });
 
         std::shared_ptr<Node> mySqrt = Sqrt();
+        auto op = std::static_pointer_cast<OperatorTensor>(mySqrt -> getOperator());
+        mySqrt->getOperator()->associateInput(0,input);
         mySqrt->getOperator()->setDataType(DataType::Float32);
         mySqrt->getOperator()->setBackend("cpu");
-        mySqrt->getOperator()->associateInput(0,input);
-        mySqrt->getOperator()->computeOutputDims();
+        op->computeOutputDims();
         mySqrt->forward();
 
-        float* resPtr = static_cast<float*>(mySqrt->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 54; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp
index 4ef01280..dfd64078 100644
--- a/unit_tests/operator/Test_SubImpl.cpp
+++ b/unit_tests/operator/Test_SubImpl.cpp
@@ -20,7 +20,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("[cpu/operator] Sub(forward)") {
+TEST_CASE("[cpu/operator] Sub(forward)", "[Sub][CPU]") {
     SECTION("2D Tensor by Singleton") {
         std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
             {
@@ -37,14 +37,15 @@ TEST_CASE("[cpu/operator] Sub(forward)") {
         });
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->computeOutputDims();
+        mySub->getOperator()->setDataType(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySub->forward();
 
-        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -73,14 +74,15 @@ TEST_CASE("[cpu/operator] Sub(forward)") {
         });
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->computeOutputDims();
+        mySub->getOperator()->setDataType(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySub->forward();
 
-        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 4; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
@@ -112,14 +114,15 @@ TEST_CASE("[cpu/operator] Sub(forward)") {
         });
 
         std::shared_ptr<Node> mySub = Sub();
-        mySub->getOperator()->setDataType(DataType::Float32);
-        mySub->getOperator()->setBackend("cpu");
+        auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
         mySub->getOperator()->associateInput(0, input_1);
         mySub->getOperator()->associateInput(1, input_2);
-        mySub->getOperator()->computeOutputDims();
+        mySub->getOperator()->setDataType(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        op->computeOutputDims();
         mySub->forward();
 
-        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* resPtr = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
         float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
         for (std::size_t i = 0; i< 12; ++i) {
             REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
-- 
GitLab