diff --git a/include/aidge/learning/optimizer/SGD.hpp b/include/aidge/learning/optimizer/SGD.hpp
index 854918a60f255826e156fb443930c8db697afc89..2f77281500b5f92c52146c60652f94530d12574c 100644
--- a/include/aidge/learning/optimizer/SGD.hpp
+++ b/include/aidge/learning/optimizer/SGD.hpp
@@ -52,7 +52,7 @@ public:
         mReversedDampening.set<float>(0, 1.0f - dampening);
     }
 
-    void update() override {
+    void update() override final {
         mLR.setBackend(mParameters[0]->getImpl()->backend());
         mLR.set<float>(0, learningRate());
         if (mParameters[0]->getImpl()->backend() != mMomentum.getImpl()->backend()) {
@@ -74,7 +74,7 @@ public:
         mLRScheduler.update();
     }
 
-    void setParameters(const std::vector<std::shared_ptr<Tensor>>& parameters) {
+    void setParameters(const std::vector<std::shared_ptr<Tensor>>& parameters) override final {
         Optimizer::setParameters(parameters);
         mGradientInertia = std::vector<Tensor>(parameters.size());
         for (std::size_t i = 0; i < parameters.size(); ++i) {
diff --git a/include/aidge/loss/LossList.hpp b/include/aidge/loss/LossList.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e65123dde897610f82ca876f1260a165b785e33f
--- /dev/null
+++ b/include/aidge/loss/LossList.hpp
@@ -0,0 +1,29 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_LOSS_LOSSLIST_H_
+#define AIDGE_CORE_LOSS_LOSSLIST_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+namespace loss {
+
+Tensor MSE(const std::shared_ptr<Tensor>& prediction,
+           const std::shared_ptr<Tensor>& target);
+
+} // loss
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_LOSS_LOSSLIST_H_ */
diff --git a/src/loss/regression/MSE.cpp b/src/loss/regression/MSE.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..227354875c34afcb50008060f2c2a455ea0d458f
--- /dev/null
+++ b/src/loss/regression/MSE.cpp
@@ -0,0 +1,72 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/loss/LossList.hpp"
+
+#include <memory>
+#include <numeric>  // std::iota
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/OpArgs.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/backend/cpu/operator/PowImpl.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/backend/cpu/operator/ReduceMeanImpl.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/backend/cpu/operator/SubImpl.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
+
+Aidge::Tensor Aidge::loss::MSE(const std::shared_ptr<Tensor>& prediction, const std::shared_ptr<Tensor>& target) {
+    AIDGE_ASSERT(prediction->backend() == target->backend(),
+        "'prediction' and 'target' Tensors must be on the same backend. Found {} and {}.\n",
+        prediction->backend(),
+        target->backend());
+    AIDGE_ASSERT(prediction->dims() == target->dims(),
+        "'prediction' (shape {}) and 'target' (shape {}) Tensors must have the same dimensions.\n",
+        prediction->dims(),
+        target->dims());
+    AIDGE_ASSERT(prediction->dataType() == target->dataType(),
+        "'prediction' (shape {}) and 'target' (shape {}) Tensors must have the same dimensions.\n",
+        prediction->dims(),
+        target->dims());
+
+    // could be accelerated with constexpr constructors
+    std::vector<int> axes_dims(prediction->nbDims());
+    std::iota(std::begin(axes_dims), std::end(axes_dims), 0);
+    auto rm_node = ReduceMean(axes_dims, 1, "mse_res");
+
+    const std::shared_ptr<Node> pow_node = Pow();
+    const std::shared_ptr<Node> pow_exp_node = Producer(std::make_shared<Tensor>(Array1D<int,1>{{2}}));
+    pow_exp_node->addChild(pow_node, 0, 1);
+
+    const std::shared_ptr<Node> sub_node = Sub();
+    Producer(prediction)->addChild(sub_node, 0, 0);
+    Producer(target)->addChild(sub_node, 0, 1);
+
+
+    std::shared_ptr<GraphView> gv_local = Sequential({
+        sub_node,
+        pow_node,
+        rm_node
+    });
+    gv_local->add({sub_node->getParent(0), sub_node->getParent(1), pow_exp_node});
+    gv_local->compile(prediction->getImpl()->backend(), prediction->dataType());
+    gv_local->save("MSEgraph");
+    SequentialScheduler ss_local{gv_local};
+    ss_local.forward(false, true);
+
+    // TODO: way too complicated to access
+    const std::shared_ptr<OperatorTensor> res = std::dynamic_pointer_cast<OperatorTensor>(rm_node->getOperator());
+    return res->getOutput(0)->clone();
+
+}
\ No newline at end of file
diff --git a/unit_tests/loss/regression/Test_MSE.cpp b/unit_tests/loss/regression/Test_MSE.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3899470b5f0141fc747f6a2a52cc35b41a590d49
--- /dev/null
+++ b/unit_tests/loss/regression/Test_MSE.cpp
@@ -0,0 +1,88 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>     // std::size_t
+#include <cmath>       //
+#include <functional>  // std::multiplies, std::plus
+#include <memory>      // std::make_unique
+#include <numeric>     // std::accumulate
+#include <random>      // std::random_device, std::mt19937,
+                       // std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/loss/LossList.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/TensorUtils.hpp"
+
+namespace Aidge {
+TEST_CASE("[loss/regression] MSE", "[loss][regression][MSE]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // set random variables
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 5);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 2);
+    std::uniform_real_distribution<float> valueDist(0.0f, 1.0f);
+
+    for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+        // Create a random number generator
+        const std::size_t nb_dims = nbDimsDist(gen);
+        std::vector<std::size_t> dims(nb_dims);
+
+        for (std::size_t i = 0; i < nb_dims; ++i) { dims[i] = dimsDist(gen); }
+        const std::size_t nb_elements = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
+
+        // create random predictions
+        std::unique_ptr<float[]> pred = std::make_unique<float[]>(nb_elements);
+        for (std::size_t i = 0; i < nb_elements; ++i) {
+            pred[i] = valueDist(gen);
+        }
+
+        // create random targets
+        std::unique_ptr<float[]> targ = std::make_unique<float[]>(nb_elements);
+        for (std::size_t i = 0; i < nb_elements; ++i) {
+            targ[i] = valueDist(gen);
+        }
+
+        // compute the MSE manually
+        std::unique_ptr<float[]> tmp_res_manual = std::make_unique<float[]>(nb_elements);
+        for (std::size_t i = 0; i < nb_elements; ++i) {
+            tmp_res_manual[i] = std::pow(pred[i] - targ[i],2);
+        }
+        std::cout << "Pow output manual:" << std::endl;
+        std::shared_ptr<Tensor> tmp_tensor = std::make_shared<Tensor>(dims);
+        tmp_tensor->setBackend("cpu");
+        tmp_tensor->getImpl()->setRawPtr(tmp_res_manual.get(), nb_elements);
+        tmp_tensor->print();
+        const float res_manual = std::accumulate(&tmp_res_manual[0], &tmp_res_manual[nb_elements], 0.0f, std::plus<float>()) / static_cast<float>(nb_elements);
+
+        // compute the MSE using Aidge::loss::MSE function
+        std::cout << "Sub input 0 manual:" << std::endl;
+        std::shared_ptr<Tensor> pred_tensor = std::make_shared<Tensor>(dims);
+        pred_tensor->setBackend("cpu");
+        pred_tensor->getImpl()->setRawPtr(pred.get(), nb_elements);
+        pred_tensor->print();
+
+        std::cout << "Sub input 1 manual:" << std::endl;
+        std::shared_ptr<Tensor> targ_tensor = std::make_shared<Tensor>(dims);
+        targ_tensor->setBackend("cpu");
+        targ_tensor->getImpl()->setRawPtr(targ.get(), nb_elements);
+        targ_tensor->print();
+        const Tensor res_function = loss::MSE(pred_tensor, targ_tensor);
+
+        // compare results
+        Tensor res_manual_tensor = Tensor(res_manual);
+        REQUIRE(approxEq<float>(res_manual, res_function));
+    }
+}
+}  // namespace Aidge
\ No newline at end of file