diff --git a/unit_tests/metrics/Test_Accuracy.cpp b/unit_tests/metrics/Test_Accuracy.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8d849feede215fbadfc684c8da68c1612879b995
--- /dev/null
+++ b/unit_tests/metrics/Test_Accuracy.cpp
@@ -0,0 +1,178 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>     // std::size_t
+#include <cmath>       //
+#include <functional>  // std::multiplies, std::plus
+#include <memory>      // std::make_unique
+#include <numeric>     // std::accumulate
+#include <random>      // std::random_device, std::mt19937,
+                       // std::uniform_int_distribution
+#include <vector>
+#include "aidge/backend/cpu/operator/ArgMaxImpl.hpp"
+#include "aidge/backend/cpu/operator/AndImpl.hpp"
+#include "aidge/backend/cpu/operator/ReduceSumImpl.hpp"
+#include "aidge/learning/metrics/Accuracy.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+#if USE_AIDGE_BACKEND_CUDA
+#include "aidge/backend/cuda/operator/ArgMaxImpl.hpp"
+#include "aidge/backend/cuda/operator/AndImpl.hpp"
+#include "aidge/backend/cuda/operator/ReduceSumImpl.hpp"
+#endif
+
+namespace Aidge {
+TEST_CASE("[metrics] Accuracy", "[metrics][Accuracy]") {
+
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // set random variables
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 5);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 2);
+    std::uniform_real_distribution<float> valueDist(0.0f, 1.0f);
+
+    SECTION("CPU") {
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = 2; // For Accuracy test, nb_dims is fixed as 2: NbBatch, NbClass
+            std::vector<std::size_t> dims(2);
+            std::int32_t classAxis =  1; 
+
+            for (std::size_t i = 0; i < nb_dims; ++i) { dims[i] = dimsDist(gen); }
+            
+            std::size_t numClasses = dims[1];
+            std::size_t numBatchess = dims[0];
+            const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+
+            // create random predictions
+            std::unique_ptr<float[]> pred = std::make_unique<float[]>(nb_elements);
+            for (std::size_t i = 0; i < nb_elements; ++i) {
+                pred[i] = valueDist(gen);
+            }
+
+            // create random targets
+            std::unique_ptr<float[]> targ = std::make_unique<float[]>(nb_elements);
+            for (std::size_t i = 0; i < nb_elements; ++i) {
+                targ[i] = valueDist(gen);
+            }
+
+            // Calculate accuracy
+            int correct_predictions = 0;
+            for (std::size_t batch = 0; batch < dims[0]; ++batch) {
+                // Find the index of the maximum value in the current batch (for both pred and targ)
+                auto pred_start = pred.get() + batch * numClasses;
+                auto targ_start = targ.get() + batch * numClasses;
+
+                std::size_t pred_max_idx = std::distance(pred_start, std::max_element(pred_start, pred_start + numClasses));
+                std::size_t targ_max_idx = std::distance(targ_start, std::max_element(targ_start, targ_start + numClasses));
+
+                // If the indices match, it's a correct prediction
+                if (pred_max_idx == targ_max_idx) {
+                    correct_predictions++;
+                }
+            }
+
+            // Calculate accuracy as the proportion of correct predictions
+            float accuracy = static_cast<float>(correct_predictions);
+
+            // compute the Accuracy using Aidge::metric::Accuracy function
+            std::shared_ptr<Tensor> pred_tensor = std::make_shared<Tensor>(dims);
+            pred_tensor->setBackend("cpu");
+            pred_tensor->getImpl()->setRawPtr(pred.get(), nb_elements);
+
+            std::shared_ptr<Tensor> targ_tensor = std::make_shared<Tensor>(dims);
+            targ_tensor->setBackend("cpu");
+            targ_tensor->getImpl()->setRawPtr(targ.get(), nb_elements);
+
+            const Tensor res_function = metrics::Accuracy(pred_tensor, targ_tensor, classAxis);
+
+            // compare results
+            Tensor res_manual_tensor = Tensor(accuracy);
+            REQUIRE(approxEq<float>(res_manual_tensor, res_function));
+        }
+    }
+#if USE_AIDGE_BACKEND_CUDA
+    SECTION("CUDA") {
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = 2; // For Accuracy test, nb_dims is fixed as 2: NbBatch, NbClass
+            std::vector<std::size_t> dims(2);
+            std::int32_t classAxis =  1; 
+
+            for (std::size_t i = 0; i < nb_dims; ++i) { dims[i] = dimsDist(gen); }
+            
+            std::size_t numClasses = dims[1];
+            std::size_t numBatchess = dims[0];
+            const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+
+            // create random predictions
+            std::unique_ptr<float[]> pred = std::make_unique<float[]>(nb_elements);
+            for (std::size_t i = 0; i < nb_elements; ++i) {
+                pred[i] = valueDist(gen);
+            }
+            float * d_pred;
+            cudaMalloc(&d_pred, nb_elements * sizeof(float));
+            cudaMemcpy(d_pred, pred.get(), nb_elements * sizeof(float), cudaMemcpyHostToDevice);
+
+            // create random targets
+            std::unique_ptr<float[]> targ = std::make_unique<float[]>(nb_elements);
+            for (std::size_t i = 0; i < nb_elements; ++i) {
+                targ[i] = valueDist(gen);
+            }
+            float * d_targ;
+            cudaMalloc(&d_targ, nb_elements * sizeof(float));
+            cudaMemcpy(d_targ, targ.get(), nb_elements * sizeof(float), cudaMemcpyHostToDevice);
+
+
+            // Calculate accuracy
+            int correct_predictions = 0;
+            for (std::size_t batch = 0; batch < dims[0]; ++batch) {
+                // Find the index of the maximum value in the current batch (for both pred and targ)
+                auto pred_start = pred.get() + batch * numClasses;
+                auto targ_start = targ.get() + batch * numClasses;
+
+                std::size_t pred_max_idx = std::distance(pred_start, std::max_element(pred_start, pred_start + numClasses));
+                std::size_t targ_max_idx = std::distance(targ_start, std::max_element(targ_start, targ_start + numClasses));
+
+                // If the indices match, it's a correct prediction
+                if (pred_max_idx == targ_max_idx) {
+                    correct_predictions++;
+                }
+            }
+
+            // Calculate accuracy as the proportion of correct predictions
+            float accuracy = static_cast<float>(correct_predictions);
+
+            // compute the MSE using Aidge::loss::MSE function
+            std::shared_ptr<Tensor> pred_tensor = std::make_shared<Tensor>(dims);
+            pred_tensor->setBackend("cuda");
+            pred_tensor->getImpl()->setRawPtr(d_pred, nb_elements);
+
+
+            std::shared_ptr<Tensor> targ_tensor = std::make_shared<Tensor>(dims);
+            targ_tensor->setBackend("cuda");
+            targ_tensor->getImpl()->setRawPtr(d_targ, nb_elements);
+
+            const Tensor res_function = metrics::Accuracy(pred_tensor, targ_tensor, classAxis);
+
+            // compare results
+            Tensor res_manual_tensor = Tensor(accuracy);
+            REQUIRE(approxEq<float>(res_manual_tensor, res_function));
+
+            cudaFree(d_pred);
+            cudaFree(d_targ);
+        }
+    }
+#endif
+}
+}  // namespace Aidge