diff --git a/unit_tests/Test_AddImpl.cpp b/unit_tests/Test_AddImpl.cpp
index 63829c138cdc340ce2408f5123c5a1b5cd544859..e1e6fb869f0d6b74c905f7c199524b1dca58315b 100644
--- a/unit_tests/Test_AddImpl.cpp
+++ b/unit_tests/Test_AddImpl.cpp
@@ -45,7 +45,6 @@ TEST_CASE("[gpu/operator] Add(forward)", "[Add][GPU]") {
         op->associateInput(0, input1);
         op->setBackend("cuda");
         op->setDataType(DataType::Float32);
-        op->computeOutputDims();
         myAdd->forward();
 
         float* computedOutput   = new float[input1->size()]();
@@ -88,7 +87,6 @@ TEST_CASE("[gpu/operator] Add(forward)", "[Add][GPU]") {
         op->associateInput(1, input1);
         op->setBackend("cuda");
         op->setDataType(DataType::Float32);
-        op->computeOutputDims();
         myAdd->forward();
 
         float* computedOutput   = new float[input1->size()]();
@@ -130,7 +128,6 @@ TEST_CASE("[gpu/operator] Add(forward)", "[Add][GPU]") {
         op->associateInput(2, input1);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAdd->forward();
 
         float* computedOutput   = new float[input1->size()]();
@@ -198,7 +195,6 @@ TEST_CASE("[gpu/operator] Add(forward)", "[Add][GPU]") {
         op->associateInput(2, input_2);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
         myAdd->forward();
 
         float* computedOutput   = new float[input1->size()]();
diff --git a/unit_tests/Test_BatchNormImpl.cpp b/unit_tests/Test_BatchNormImpl.cpp
index a601b5ab90d423ea9a473498940a8443f00bf98b..c4051d531a7ae0c2c9c7d6506f1da229fe8745e6 100644
--- a/unit_tests/Test_BatchNormImpl.cpp
+++ b/unit_tests/Test_BatchNormImpl.cpp
@@ -104,7 +104,7 @@ TEST_CASE("[gpu/operator] BatchNorm(forward)") {
     op->associateInput(2,myBias);
     op->associateInput(3,myMean);
     op->associateInput(4,myVar);
-    op->computeOutputDims();
+
     op->forward();
 
     float* computedOutput   = new float[myOutput->size()]();
diff --git a/unit_tests/Test_ConvDepthWiseImpl.cpp b/unit_tests/Test_ConvDepthWiseImpl.cpp
index 1fad201a2c775a8d802010def44ff44c1d5d4a3f..ed953ec2cb15523618065b0e5fd448e3183c2806 100644
--- a/unit_tests/Test_ConvDepthWiseImpl.cpp
+++ b/unit_tests/Test_ConvDepthWiseImpl.cpp
@@ -152,7 +152,7 @@ TEST_CASE("[cpu/operator] ConvDepthWise(forward)", "[ConvDepthWise][CPU]") {
     op -> associateInput(2, myBias);
     op->setDataType(DataType::Float32);
     op->setBackend("cuda");
-    op -> computeOutputDims();
+
     myCDW -> forward();
 
     float* computedOutput   = new float[myOutput->size()]();
diff --git a/unit_tests/Test_GlobalAveragePoolingImpl.cpp b/unit_tests/Test_GlobalAveragePoolingImpl.cpp
index 5b7311cec30b542682f6a910e828bdbf1938b8b9..bdde2354ae38f3265d1f24fe4b494d2806190688 100644
--- a/unit_tests/Test_GlobalAveragePoolingImpl.cpp
+++ b/unit_tests/Test_GlobalAveragePoolingImpl.cpp
@@ -66,7 +66,7 @@ TEST_CASE("[gpu/operator] GlobalAveragePooling",
       op->setBackend("cuda");
 
       op->associateInput(0, myInput);
-      op->computeOutputDims();
+
       globAvgPool->forward();
       float* computedOutput   = new float[myOutput->size()]();
       cudaMemcpy(computedOutput, op->getOutput(0)->getImpl()->rawPtr(), sizeof(float) * myOutput->size(), cudaMemcpyDeviceToHost);
diff --git a/unit_tests/Test_PadImpl.cpp b/unit_tests/Test_PadImpl.cpp
index 1c9e8b13dab0fb6be9a5fb2d99461cd7fcb91b4b..85741feaf562fefe0d91b293adecaa23bff0d9de 100644
--- a/unit_tests/Test_PadImpl.cpp
+++ b/unit_tests/Test_PadImpl.cpp
@@ -131,7 +131,7 @@ TEST_CASE("[gpu/operator] Pad(forward)", "[Pad][GPU]") {
         myPad->getOperator()->associateInput(0,myInput);
         myPad->getOperator()->setDataType(DataType::Float32);
         myPad->getOperator()->setBackend("cuda");
-        op->computeOutputDims();
+
         myPad->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -245,7 +245,7 @@ TEST_CASE("[gpu/operator] Pad(forward)", "[Pad][GPU]") {
         myPad->getOperator()->associateInput(0,myInput);
         myPad->getOperator()->setDataType(DataType::Float32);
         myPad->getOperator()->setBackend("cuda");
-        op->computeOutputDims();
+
         myPad->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -362,7 +362,7 @@ TEST_CASE("[gpu/operator] Pad(forward)", "[Pad][GPU]") {
         myPad->getOperator()->associateInput(0,myInput);
         myPad->getOperator()->setDataType(DataType::Float32);
         myPad->getOperator()->setBackend("cuda");
-        op->computeOutputDims();
+
         myPad->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -491,7 +491,7 @@ TEST_CASE("[gpu/operator] Pad(forward)", "[Pad][GPU]") {
         myPad->getOperator()->associateInput(0,myInput);
         myPad->getOperator()->setDataType(DataType::Float32);
         myPad->getOperator()->setBackend("cuda");
-        op->computeOutputDims();
+
         myPad->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -608,7 +608,7 @@ TEST_CASE("[gpu/operator] Pad(forward)", "[Pad][GPU]") {
         myPad->getOperator()->associateInput(0,myInput);
         myPad->getOperator()->setDataType(DataType::Float32);
         myPad->getOperator()->setBackend("cuda");
-        op->computeOutputDims();
+
         myPad->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
diff --git a/unit_tests/Test_ReshapeImpl.cpp b/unit_tests/Test_ReshapeImpl.cpp
index ae055b8609b38313be5e5ae879f4f64a86765392..35d14892245262d021323c58602d87f8fe23f8c0 100644
--- a/unit_tests/Test_ReshapeImpl.cpp
+++ b/unit_tests/Test_ReshapeImpl.cpp
@@ -40,7 +40,7 @@ TEST_CASE("[gpu/operator] Reshape(forward)") {
         op->associateInput(0, input);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
+
         myReshape->forward();
 
         float* computedOutput   = new float[myOutput->size()]();
@@ -74,7 +74,7 @@ TEST_CASE("[gpu/operator] Reshape(forward)") {
         op->associateInput(0, input);
         op->setDataType(DataType::Float32);
         op->setBackend("cuda");
-        op->computeOutputDims();
+
         myReshape->forward();
 
         float* computedOutput   = new float[myOutput->size()]();