diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
index 7518bd18a62c391f238fbecec6d91391867e3e57..2937e94938c671140eeeee87d47d5c48f685203e 100644
--- a/unit_tests/operator/Test_MulImpl.cpp
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -9,24 +9,29 @@
  *
  ********************************************************************************/
 
-#include <catch2/catch_test_macros.hpp>
 #include <chrono>
 #include <cstddef> // std::size_t
 #include <cstdint> // std::uint16_t
-#include <iostream>
 #include <memory>
 #include <numeric> // std::accumulate
-#include <random> // std::random_device, std::mt19937, std::uniform_real_distribution
+#include <random>  // std::random_device, std::mt19937, std::uniform_real_distribution,
+                   // std::uniform_int_distribution
+
+#include <catch2/catch_test_macros.hpp>
 
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/backend/cpu/operator/MulImpl.hpp"
+#include "aidge/data/DataType.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Mul.hpp"
+#include "aidge/utils/ArrayHelpers.hpp"
+#include "aidge/utils/Log.hpp"
 #include "aidge/utils/TensorUtils.hpp"
 
 namespace Aidge {
 
 TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
-    std::shared_ptr<Node> myMul = Mul();
-    auto op = std::static_pointer_cast<OperatorTensor>(myMul->getOperator());
+    std::shared_ptr<Mul_Op> op = std::make_shared<Mul_Op>();
     op->setDataType(DataType::Float32);
     op->setBackend("cpu");
 
@@ -34,19 +39,10 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
 
     SECTION("Case 1: 1D and 2D Tensors") {
         const auto T0 = std::make_shared<Tensor>(
-            Array2D<float, 2, 3>({{{1, 2, 3}, {4, 5, 6}}}));
+            Array2D<cpptype_t<DataType::Float32>, 2, 3>({{{1, 2, 3}, {4, 5, 6}}}));
 
         const auto T1 =
-            std::make_shared<Tensor>(Array1D<float, 3>({0.1, 0.2, 0.3}));
-
-        float *input0 = static_cast<float *>(T0->getImpl()->rawPtr());
-        float *input1 = static_cast<float *>(T1->getImpl()->rawPtr());
-
-        // TODO Use
-        T0->setDataType(DataType::Float32);
-        T0->setBackend("cpu");
-        T1->setDataType(DataType::Float32);
-        T1->setBackend("cpu");
+            std::make_shared<Tensor>(Array1D<cpptype_t<DataType::Float32>, 3>({0.1, 0.2, 0.3}));
 
         op->associateInput(0, T0);
         op->associateInput(1, T1);
@@ -54,16 +50,15 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
             Array2D<float, 2, 3>({{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}})));
         op->forwardDims();
 
-        myMul->backward();
+        op->backward();
 
-        const auto expectedGrad0 = std::make_shared<Tensor>(
-            Array2D<float, 2, 3>({{{0.1, 0.2, 0.3}, {0.1, 0.2, 0.3}}}));
+        const Tensor expectedGrad0 =
+            Array2D<cpptype_t<DataType::Float32>, 2, 3>({{{0.1, 0.2, 0.3}, {0.1, 0.2, 0.3}}});
 
-        const auto expectedGrad1 =
-            std::make_shared<Tensor>(Array1D<float, 3>({5, 7, 9}));
+        const Tensor expectedGrad1 = Array1D<cpptype_t<DataType::Float32>, 3>({5, 7, 9});
 
-        REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0));
-        REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *expectedGrad1));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
     }
 
     SECTION("Case 2: 3D and 1D tensors") {
@@ -77,31 +72,25 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
         const auto newGrad = std::make_shared<Tensor>(Array3D<float, 2, 2, 3>(
             {{{{1, 1, 1}, {1, 1, 1}}, {{1, 1, 1}, {1, 1, 1}}}}));
 
-        const auto expectedGrad0 = std::make_shared<Tensor>(
+        const Tensor expectedGrad0 =
             Array3D<float, 2, 2, 3>({{{{0.3, 0.2, 0.1}, {0.3, 0.2, 0.1}},
-                                      {{0.3, 0.2, 0.1}, {0.3, 0.2, 0.1}}}}));
-
-        const auto expectedGrad1 =
-            std::make_shared<Tensor>(Array1D<float, 3>({22.0, 26.0, 30.0}));
+                                      {{0.3, 0.2, 0.1}, {0.3, 0.2, 0.1}}}});
 
-        for (auto T : {T0, T1, newGrad, expectedGrad0, expectedGrad1}) {
-            T->setBackend("cpu");
-            T->setDataType(DataType::Float32);
-        }
+        const Tensor expectedGrad1 = Array1D<cpptype_t<DataType::Float32>, 3>({22.0, 26.0, 30.0});
 
         op->associateInput(0, T0);
         op->associateInput(1, T1);
         op->getOutput(0)->setGrad(newGrad);
         op->forwardDims();
 
-        myMul->backward();
+        op->backward();
 
-        REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0));
-        REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *expectedGrad1));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
     }
 
     SECTION("Case 3: 4D and 2D tensors") {
-        const auto T0 = std::make_shared<Tensor>(Array4D<float, 2, 2, 3, 3>(
+        const auto T0 = std::make_shared<Tensor>(Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
             {{{{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}},
                {{10.0, 11.0, 12.0}, {13.0, 14.0, 15.0}, {16.0, 17.0, 18.0}}},
               {{{19.0, 20.0, 21.0}, {22.0, 23.0, 24.0}, {25.0, 26.0, 27.0}},
@@ -109,42 +98,37 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
                 {31.0, 32.0, 33.0},
                 {34.0, 35.0, 36.0}}}}}));
 
-        const auto T1 = std::make_shared<Tensor>(Array2D<float, 3, 3>(
+        const auto T1 = std::make_shared<Tensor>(Array2D<cpptype_t<DataType::Float32>, 3, 3>(
             {{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}}));
 
         const auto newGrad =
-            std::make_shared<Tensor>(Array4D<float, 2, 2, 3, 3>(
+            std::make_shared<Tensor>(Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
                 {{{{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}},
                    {{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}},
                   {{{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}},
                    {{1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}, {1.0, 1.0, 1.0}}}}}));
 
-        const auto expectedGrad0 =
-            std::make_shared<Tensor>(Array4D<float, 2, 2, 3, 3>(
+        const Tensor expectedGrad0 =
+            Array4D<cpptype_t<DataType::Float32>, 2, 2, 3, 3>(
                 {{{{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}},
                    {{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}},
                   {{{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}},
-                   {{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}}}}));
+                   {{0.5, 0.3, 0.1}, {0.4, 0.2, 0.6}, {0.7, 0.8, 0.9}}}}});
 
-        const auto expectedGrad1 = std::make_shared<Tensor>(
-            Array2D<float, 3, 3>({{{58.0, 62.0, 66.0},
+        const Tensor expectedGrad1 =
+            Array2D<cpptype_t<DataType::Float32>, 3, 3>({{{58.0, 62.0, 66.0},
                                    {70.0, 74.0, 78.0},
-                                   {82.0, 86.0, 90.0}}}));
-
-        for (const auto T : {T0, T1, newGrad, expectedGrad0, expectedGrad1}) {
-            T->setBackend("cpu");
-            T->setDataType(DataType::Float32);
-        }
+                                   {82.0, 86.0, 90.0}}});
 
         op->associateInput(0, T0);
         op->associateInput(1, T1);
         op->getOutput(0)->setGrad(newGrad);
         op->forwardDims();
 
-        myMul->backward();
+        op->backward();
 
-        REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0));
-        REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *expectedGrad1));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
     }
 
     SECTION("Case 4: 3D and 2D tensors") {
@@ -161,12 +145,12 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
                                       }}}));
 
         const auto T1 = std::make_shared<Tensor>(
-            Array2D<float, 3, 4>({{{0.1, 0.2, 0.3, 0.4},
+            Array2D<cpptype_t<DataType::Float32>, 3, 4>({{{0.1, 0.2, 0.3, 0.4},
                                    {0.5, 0.6, 0.7, 0.8},
                                    {0.9, 1.0, 1.1, 1.2}}}));
 
         const auto newGrad = std::make_shared<Tensor>(
-            Array3D<float, 2, 3, 4>({{{
+            Array3D<cpptype_t<DataType::Float32>, 2, 3, 4>({{{
                                           {1.0, 1.0, 1.0, 1.0},
                                           {1.0, 1.0, 1.0, 1.0},
                                           {1.0, 1.0, 1.0, 1.0},
@@ -177,81 +161,68 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
                                           {1.0, 1.0, 1.0, 1.0},
                                       }}}));
 
-        const auto expectedGrad0 = std::make_shared<Tensor>(
-            Array3D<float, 2, 3, 4>({{{{0.1, 0.2, 0.3, 0.4},
+        const Tensor expectedGrad0 =
+            Array3D<cpptype_t<DataType::Float32>, 2, 3, 4>({{{{0.1, 0.2, 0.3, 0.4},
                                        {0.5, 0.6, 0.7, 0.8},
                                        {0.9, 1.0, 1.1, 1.2}},
                                       {{0.1, 0.2, 0.3, 0.4},
                                        {0.5, 0.6, 0.7, 0.8},
-                                       {0.9, 1.0, 1.1, 1.2}}}}));
+                                       {0.9, 1.0, 1.1, 1.2}}}});
 
-        const auto expectedGrad1 = std::make_shared<Tensor>(
-            Array2D<float, 3, 4>({{{14.0, 16.0, 18.0, 20.0},
+        const Tensor expectedGrad1 =
+            Array2D<cpptype_t<DataType::Float32>, 3, 4>({{{14.0, 16.0, 18.0, 20.0},
                                    {22.0, 24.0, 26.0, 28.0},
-                                   {30.0, 32.0, 34.0, 36.0}}}));
-
-        for (const auto T : {T0, T1, newGrad, expectedGrad0, expectedGrad1}) {
-            T->setBackend("cpu");
-            T->setDataType(DataType::Float32);
-        }
+                                   {30.0, 32.0, 34.0, 36.0}}});
 
         op->associateInput(0, T0);
         op->associateInput(1, T1);
         op->getOutput(0)->setGrad(newGrad);
         op->forwardDims();
 
-        myMul->backward();
+        op->backward();
 
-        REQUIRE(approxEq<float>(*(op->getInput(0)->grad()), *expectedGrad0));
-        REQUIRE(approxEq<float>(*(op->getInput(1)->grad()), *expectedGrad1));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(0)->grad()), expectedGrad0));
+        REQUIRE(approxEq<cpptype_t<DataType::Float32>>(*(op->getInput(1)->grad()), expectedGrad1));
     }
 
     SECTION("Case 5: Tensors with random values") {
 
         // Use random values
-        std::vector<std::size_t> dims0 = {5, 2, 1, 7}; // First tensor
-        std::vector<std::size_t> dims1 = {2, 6, 7};    // Second tensor
-        std::vector<std::size_t> outputDims = {5, 2, 6, 7};
-
-        const auto input0Size = 5 * 2 * 1 * 7;
-        const auto input1Size = 2 * 6 * 7;
-        const auto outputSize = 5 * 2 * 6 * 7;
+        const std::vector<std::size_t> dims0 = {5, 2, 1, 7}; // First tensor
+        const std::vector<std::size_t> dims1 = {2, 6, 7};    // Second tensor
+        const std::vector<std::size_t> outputDims = {5, 2, 6, 7};
 
         std::random_device rd;
         std::mt19937 gen(rd());
         std::uniform_real_distribution<float> dist(0.1f, 1.0f);
 
-        std::vector<float> input0Data(input0Size);
-        std::vector<float> input1Data(input1Size);
-
-        // Fill with random values
-        for (auto &val : input0Data) {
-            val = dist(gen);
-        }
-        for (auto &val : input1Data) {
-            val = dist(gen);
-        }
-
-        auto T0 = std::make_shared<Tensor>();
-        auto T1 = std::make_shared<Tensor>();
-
+        auto T0 = std::make_shared<Tensor>(dims0);
         T0->setDataType(DataType::Float32);
         T0->setBackend("cpu");
-        T0->resize(dims0);
-        T0->getImpl()->setRawPtr(input0Data.data(), input0Size);
+        float* input0Data = static_cast<float*>(T0->getImpl()->rawPtr());
+        // Fill with random values
+        for (std::size_t i = 0; i < T0->size(); ++i) {
+            input0Data[i] = dist(gen);
+        }
 
+        auto T1 = std::make_shared<Tensor>(dims1);
         T1->setDataType(DataType::Float32);
         T1->setBackend("cpu");
-        T1->resize(dims1);
-        T1->getImpl()->setRawPtr(input1Data.data(), input1Size);
+        float* input1Data = static_cast<float*>(T1->getImpl()->rawPtr());
+        // Fill with random values
+        for (std::size_t i = 0; i < T1->size(); ++i) {
+            input1Data[i] = dist(gen);
+        }
 
         op->associateInput(0, T0);
         op->associateInput(1, T1);
 
         op->forwardDims();
-        myMul->forward();
+        op->forward();
 
-        std::vector<float> expectedOutput(outputSize);
+        Tensor expectedOutput{outputDims};
+        expectedOutput.setBackend("cpu");
+        float* expectedOutputData = static_cast<float*>(expectedOutput.getImpl()->rawPtr());
 
         for (std::size_t n = 0; n < 5; ++n) {
             for (std::size_t c = 0; c < 2; ++c) {
@@ -263,8 +234,7 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
                         std::size_t in1Idx =
                             w + 7 * (h + 6 * c);           // no n dimension
 
-                        expectedOutput[outIdx] =
-                            input0Data[in0Idx] * input1Data[in1Idx];
+                        expectedOutputData[outIdx] = input0Data[in0Idx] * input1Data[in1Idx];
                     }
                 }
             }
@@ -272,18 +242,10 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
 
         auto outputTensor = op->getOutput(0);
 
-        // Verify forward pass
-        auto expectedOutputTensor = std::make_shared<Tensor>();
-        expectedOutputTensor->resize(outputDims);
-        expectedOutputTensor->setBackend("cpu");
-        expectedOutputTensor->setDataType(DataType::Float32);
-        expectedOutputTensor->getImpl()->setRawPtr(expectedOutput.data(),
-                                                     expectedOutput.size());
-
-        REQUIRE(approxEq<float>(*outputTensor, *expectedOutputTensor));
+        REQUIRE(approxEq<float>(*outputTensor, expectedOutput));
 
         // Backward pass
-        std::vector<float> gradOutputData(outputSize);
+        std::vector<float> gradOutputData(expectedOutput.size());
         for (auto &val : gradOutputData) {
             val = dist(gen);
         }
@@ -291,11 +253,11 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
         op->getOutput(0)->setGrad(std::make_shared<Tensor>());
         op->getOutput(0)->grad()->resize(outputDims);
         op->getOutput(0)->grad()->getImpl()->setRawPtr(gradOutputData.data(),
-                                                       outputSize);
+                                                       expectedOutput.size());
 
         // Compute reference gradients
-        std::vector<float> expectedGrad0(input0Size, 0.0f);
-        std::vector<float> expectedGrad1(input1Size, 0.0f);
+        std::vector<float> expectedGrad0(T0->size(), 0.0f);
+        std::vector<float> expectedGrad1(T1->size(), 0.0f);
 
         for (std::size_t n = 0; n < 5; ++n) {
             for (std::size_t c = 0; c < 2; ++c) {
@@ -318,7 +280,7 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
         }
 
         // Perform backward pass
-        myMul->backward();
+        op->backward();
 
         auto expectedGrad0Tensor = std::make_shared<Tensor>();
         expectedGrad0Tensor->resize(T0->dims());
@@ -327,8 +289,7 @@ TEST_CASE("[CPU/Operator] Mul(Backward)", "[Mul][CPU][Backward]") {
         expectedGrad0Tensor->getImpl()->setRawPtr(expectedGrad0.data(),
                                                     expectedGrad0.size());
 
-        auto expectedGrad1Tensor = std::make_shared<Tensor>();
-        expectedGrad1Tensor->resize(T1->dims());
+        auto expectedGrad1Tensor = std::make_shared<Tensor>(T1->dims());
         expectedGrad1Tensor->setBackend("cpu");
         expectedGrad1Tensor->setDataType(DataType::Float32);
         expectedGrad1Tensor->getImpl()->setRawPtr(expectedGrad1.data(),
@@ -365,8 +326,7 @@ TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
                                                           std::size_t(3));
     std::uniform_int_distribution<int> boolDist(0, 1);
 
-    std::shared_ptr<Node> myMul = Mul();
-    auto op = std::static_pointer_cast<OperatorTensor>(myMul->getOperator());
+    std::shared_ptr<Mul_Op> op = std::make_shared<Mul_Op>();
     op->setDataType(DataType::Float32);
     op->setBackend("cpu");
 
@@ -437,7 +397,7 @@ TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
 
                 op->forwardDims();
                 start = std::chrono::system_clock::now();
-                myMul->forward();
+                op->forward();
                 end = std::chrono::system_clock::now();
                 duration +=
                     std::chrono::duration_cast<std::chrono::microseconds>(
@@ -449,10 +409,8 @@ TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
                 delete[] array1;
                 delete[] result;
             }
-            std::cout << "number of elements over time spent: "
-                      << (number_of_operation / duration.count()) << std::endl;
-            std::cout << "total time: " << duration.count() << "μs"
-                      << std::endl;
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
 
         SECTION("+1-D Tensor / +1-D Tensor - broadcasting") {
@@ -586,7 +544,7 @@ TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
                 // compute result
                 op->forwardDims();
                 start = std::chrono::system_clock::now();
-                myMul->forward();
+                op->forward();
                 end = std::chrono::system_clock::now();
                 duration +=
                     std::chrono::duration_cast<std::chrono::microseconds>(
@@ -606,10 +564,8 @@ TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
                                     std::multiplies<std::size_t>());
                 number_of_operation += nb_elements;
             }
-            std::cout << "number of elements over time spent: "
-                      << (number_of_operation / duration.count()) << std::endl;
-            std::cout << "total time: " << duration.count() << "μs"
-                      << std::endl;
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
         SECTION("+1-D Tensor / 1-D Tensor") {
             std::size_t number_of_operation = 0;
@@ -725,7 +681,7 @@ TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
                 // compute result
                 op->forwardDims();
                 start = std::chrono::system_clock::now();
-                myMul->forward();
+                op->forward();
                 end = std::chrono::system_clock::now();
                 duration +=
                     std::chrono::duration_cast<std::chrono::microseconds>(
@@ -746,10 +702,8 @@ TEST_CASE("[cpu/operator] Mul(forward)", "[Mul][CPU]") {
                 number_of_operation += nb_elements;
             }
 
-            std::cout << "number of elements over time spent: "
-                      << (number_of_operation / duration.count()) << std::endl;
-            std::cout << "total time: " << duration.count() << "μs"
-                      << std::endl;
+            Log::info("number of elements over time spent: {}\n", (number_of_operation / duration.count()));
+            Log::info("total time: {}μs\n", duration.count());
         }
     }
 }