diff --git a/unit_tests/Test_AvgPoolingImpl.cpp b/unit_tests/Test_AvgPoolingImpl.cpp
index 3024239bcf971c92fd1b644437304501bfeac2a0..5451917d74e99a1a7d6207c57c9f24d37487b819 100644
--- a/unit_tests/Test_AvgPoolingImpl.cpp
+++ b/unit_tests/Test_AvgPoolingImpl.cpp
@@ -13,6 +13,8 @@
 
 #include <catch2/catch_test_macros.hpp>
 #include <cuda_fp16.h>
+#include <numeric>   // std::accumulate
+#include <random>    // std::random_device, std::mt19937, std::uniform_real_distribution
 
 #include "Test_cuda.hpp"
 
@@ -56,7 +58,7 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         }
     });
     SECTION("Stride") {
-        std::shared_ptr<Node> myAvgPool = AvgPooling({2,2}, "mycdw", {2,2});
+        std::shared_ptr<Node> myAvgPool = AvgPooling({2,2}, "myAvgPool", {2,2});
         auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
 
         std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,2,2,2,2> {
@@ -102,7 +104,7 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
             }
         }
         });
-        std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mycdw", {3,3});
+        std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "myAvgPool", {3,3});
         auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
 
         std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<float,1,1,1,1> {
@@ -137,7 +139,7 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
         });
         myInput2->setBackend("cuda");
 
-        std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mycdw", {3,3});
+        std::shared_ptr<Node> myAvgPool = AvgPooling({3,3}, "mymyAvgPoolcdw", {3,3});
         auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
         std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<half_float::half,1,1,1,1> {
             {{{{(half_float::half(0.3745) + half_float::half(0.9507) + half_float::half(0.7320) + half_float::half(0.5987) + half_float::half(0.1560) + half_float::half(0.1560) + half_float::half(0.0581) + half_float::half(0.8662) + half_float::half(0.6011))/half_float::half(9.0)}}}}
@@ -158,4 +160,149 @@ TEST_CASE("[gpu/operator] AvgPooling(forward)", "[AvgPooling][GPU]") {
 
         delete[] computedOutput;
     }
+
+    int number_of_operation{0};
+    SECTION("Random Input") {
+        constexpr std::uint16_t NBTRIALS = 10;
+        std::size_t kernel = 2;
+        std::size_t stride = 2;
+        // Create a random number generator
+        std::random_device rd;
+        std::mt19937 gen(rd());
+        std::uniform_real_distribution<float> valueDist(
+            0.1f, 1.1f); // Random float distribution between 0 and 1
+        std::uniform_int_distribution<std::size_t> dimSizeDist(std::size_t(2),
+                                                                std::size_t(10));
+
+        std::uniform_int_distribution<std::size_t> nbDimsDist(std::size_t(4), std::size_t(4));
+
+        // Create AveragePooling Operator
+        std::shared_ptr<Node> myAvgPool = AvgPooling({kernel,kernel}, "myAvgPool", {stride,stride});
+        auto op = std::static_pointer_cast<OperatorTensor>(myAvgPool -> getOperator());
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+
+        // Create the input Tensor
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        op->associateInput(0, T0);
+        T0->setDataType(DataType::Float32);
+        T0->setBackend("cpu");
+
+        // To measure execution time of 'AveragePooling_Op::forward()' 
+        std::chrono::time_point<std::chrono::system_clock> start;
+        std::chrono::time_point<std::chrono::system_clock> end;
+        std::chrono::duration<double, std::micro> duration{};
+        std::size_t number_of_operation = 0;
+
+        SECTION("OutDims") {
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate a random Tensor
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(dimSizeDist(gen));
+                }
+                
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // Fill input tensor
+                float* array0 = new float[nb_elements];
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // Run inference
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myAvgPool->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                // Verify output dimensions
+                REQUIRE(op->getOutput(0)->nbDims() == dims.size());
+                for (size_t i = 0; i < op->getOutput(0)->nbDims(); ++i) {
+                    if(i == 2 || i == 3)
+                        REQUIRE(op->getOutput(0)->dims()[i] == (1 + static_cast<DimSize_t>(std::floor(static_cast<float>(dims[i] - kernel) / static_cast<float>(stride)))));
+                    else
+                        REQUIRE(op->getOutput(0)->dims()[i] == dims[i]);
+                }
+        
+                delete[] array0;
+            }
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
+        }
+
+        SECTION("Values") {
+            for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+                // generate a random Tensor
+                const std::size_t nbDims = nbDimsDist(gen);
+                std::vector<std::size_t> dims;
+                for (std::size_t i = 0; i < nbDims; ++i) {
+                    dims.push_back(4);
+                }
+                
+                const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+                number_of_operation += nb_elements;
+
+                // Fill input tensor
+                float* array0 = new float[nb_elements];
+                for (std::size_t i = 0; i < nb_elements; ++i) {
+                    array0[i] = valueDist(gen);
+                }
+                T0->resize(dims);
+                T0 -> getImpl() -> setRawPtr(array0, nb_elements);
+
+                // Fill expected output
+                std::vector<float> result;
+                std::size_t rows = dims[2], cols = dims[3], nbMat = dims[0] * dims[1], matSize = rows*cols;
+                for (size_t i = 0; i < nbMat; i++)
+                {
+                    for(size_t r=0; r< rows; r += stride){
+                        for(size_t c=0; c< cols; c += stride){
+                            float sum = 0.0f;
+                            for (size_t m = 0; m < kernel; m++)
+                            {
+                                for (size_t n = 0; n < kernel; n++)
+                                {
+                                    sum += array0[i * matSize + (r + m) * cols + c + n];
+                                }
+                                
+                            }
+                            result.push_back(sum/(kernel*kernel));
+                        }
+                    }
+                }
+                // energy based model
+                //adversarial attacks: add noise on image so perturber le modèle
+                // white box attacks and black box attacks
+
+                // langevin sampling
+
+                // Run inference
+                op->computeOutputDims();
+                start = std::chrono::system_clock::now();
+                myAvgPool->forward();
+                end = std::chrono::system_clock::now();
+                duration += std::chrono::duration_cast<std::chrono::microseconds>(end - start);
+
+                std::cout << "---------output" << std::endl;
+                op->getOutput(0)->print();
+                float* computedOutput = static_cast<float*>(op->getOutput(0)->getImpl()->rawPtr());
+                for (size_t i = 0; i < op->getOutput(0)->size(); i++)
+                {
+                    std::cout << "i " << i << " computed: "<< computedOutput[i] << ", expected " << result[i] << std::endl;
+                    // REQUIRE(approxEq<float>(computedOutput[i], result[i]));
+                    REQUIRE(abs(computedOutput[i] - result[i]) < 1e-6);
+                }
+                
+                delete[] array0;
+            }
+            std::cout << "number of elements over time spent: " << (number_of_operation / duration.count())<< std::endl;
+            std::cout << "total time: " << duration.count() << "μs" << std::endl;
+        }
+    }
 }
\ No newline at end of file