diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 0faca9651d8fa7688693df26aef3d815ab47b0ac..37a781c6c0b0a6cd5ec4553889d739b5d20cac2d 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -31,6 +31,7 @@
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
 #include "aidge/backend/cpu/operator/FoldImpl.hpp"
 #include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp"
+#include "aidge/backend/cpu/operator/LRNImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/LnImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
diff --git a/include/aidge/backend/cpu/operator/LRNImpl.hpp b/include/aidge/backend/cpu/operator/LRNImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..81956c8763010d6294bd4a11a943e66fb93a64eb
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/LRNImpl.hpp
@@ -0,0 +1,32 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_LRNIMPL_H_
+#define AIDGE_CPU_OPERATOR_LRNIMPL_H_
+
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
+#include "aidge/operator/LRN.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// Operator implementation entry point for the backend
+using LRNImpl_cpu = OperatorImpl_cpu<LRN_Op,
+    void(float, float, float, std::size_t, const std::vector<DimSize_t>&, const void*, void*)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(LRN_Op, "cpu", Aidge::LRNImpl_cpu::create);
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_LRNIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LRNImpl_kernels.hpp b/include/aidge/backend/cpu/operator/LRNImpl_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..02018c9f8e002965584df38a95364ca10f69f8b7
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/LRNImpl_kernels.hpp
@@ -0,0 +1,69 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_LRNIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_LRNIMPL_KERNELS_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include <cstddef>
+#include <cmath>
+#include "aidge/data/Data.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+#include "aidge/backend/cpu/operator/LRNImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void LRNImpl_cpu_forward_kernel(float alpha, float beta, float bias, std::size_t size, const std::vector<DimSize_t>& inputDims, const void* input_, void* output_)
+{
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+    const DimSize_t nbBatch = inputDims[0];
+    const DimSize_t nbChannels = (inputDims.size() > 1) ? inputDims[1] : 1;
+    const DimSize_t featureMapSize = (inputDims.size() > 2) ? std::accumulate(inputDims.begin() + 2, inputDims.end(), 1, std::multiplies<DimSize_t>()) : 1;
+
+    for (std::size_t batch = 0; batch < nbBatch; ++batch) {
+        for (std::size_t ch = 0; ch < nbChannels; ++ch) {
+            const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize;
+            const unsigned int channelMin
+                = std::max<int>(0, ch - size / 2);
+            const unsigned int channelMax
+                = std::min<size_t>(nbChannels - 1, ch + size / 2);
+
+            for (std::size_t feature = 0; feature<featureMapSize; ++feature) {
+                // For each input channel, accumulate the value
+                O accAccrossChannels(0.0);
+
+                for (unsigned int accChannel = channelMin;
+                    accChannel < channelMax; ++accChannel)
+                {
+                    accAccrossChannels += input[ioIndex + feature];
+                }
+
+                // Compute the output signal
+                output[ioIndex + feature] = input[ioIndex + feature]
+                    / std::pow((bias + (accAccrossChannels * accAccrossChannels) * alpha), beta);
+            }
+        }
+    }
+}
+
+REGISTRAR(LRNImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::LRNImpl_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(LRNImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::LRNImpl_cpu_forward_kernel<double, double>, nullptr});
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_LRNIMPL_KERNELS_H_ */
diff --git a/src/operator/LRNImpl.cpp b/src/operator/LRNImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b914ffac236e995c58fe2c6a10417c32493b791c
--- /dev/null
+++ b/src/operator/LRNImpl.cpp
@@ -0,0 +1,46 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/LRN.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+#include "aidge/backend/cpu/operator/LRNImpl.hpp"
+#include "aidge/backend/cpu/operator/LRNImpl_kernels.hpp"
+
+template <>
+void Aidge::LRNImpl_cpu::forward() {
+    const auto& op_ = dynamic_cast<const LRN_Op&>(mOp);
+    AIDGE_ASSERT(!op_.getInput(0)->empty(), "LRN input empty");
+
+    // Find the correct kernel type
+    const auto impl = Registrar<LRNImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+
+    // Call kernel
+    impl.forward(op_.alpha(),
+               op_.beta(),
+               op_.bias(),
+               op_.size(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(),
+               std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->getImpl()->rawPtr(),
+               std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->getImpl()->rawPtr());
+}
+
+template <>
+void Aidge::LRNImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for LRN_Op on backend cpu");
+}
diff --git a/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp
index d5f2065b624de431b43edef9a83bf079905129dd..43af544871ad6c2ac319de09f3c6fce5065e60d5 100644
--- a/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePoolingImpl.cpp
@@ -124,7 +124,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
             dims_in[1]; //  averaging per channel : 1 addition per element in
                         //  the channel + 1 division this for every batch
         // create out nb_elems
-        std::vector<std::size_t> dims_out{dims_in[0], dims_in[1]};
+        std::vector<std::size_t> dims_out(dims_in.size(), 1);
+        dims_out[0] = dims_in[0];
+        dims_out[1] = dims_in[1];
         const std::size_t out_nb_elems =
             std::accumulate(dims_out.cbegin(), dims_out.cend(), std::size_t(1),
                             std::multiplies<std::size_t>());
@@ -192,7 +194,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
                           //  the channel + 1 division this for every batch
 
           // create out nb_elems
-          std::vector<std::size_t> dims_out{dims_in[0], dims_in[1]};
+          std::vector<std::size_t> dims_out(dims_in.size(), 1);
+          dims_out[0] = dims_in[0];
+          dims_out[1] = dims_in[1];
           const std::size_t out_nb_elems =
               std::accumulate(dims_out.cbegin(), dims_out.cend(),
                               std::size_t(1), std::multiplies<std::size_t>());
@@ -253,7 +257,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
         SECTION("2D_img") {
           const std::vector<DimSize_t> in_dims{batch_size, channels, height,
                                                width};
-          const std::vector<DimSize_t> out_dims{batch_size, channels};
+          std::vector<std::size_t> out_dims(in_dims.size(), 1);
+          out_dims[0] = in_dims[0];
+          out_dims[1] = in_dims[1];
           DimSize_t in_nb_elems = batch_size * channels * height * width;
           DimSize_t out_nb_elems = batch_size * channels;
           number_of_operation +=
@@ -368,7 +374,9 @@ TEST_CASE("[cpu/operator] GlobalAveragePooling",
         SECTION("3D_img") {
           const std::vector<DimSize_t> in_dims{batch_size, channels, height,
                                                width, depth};
-          const std::vector<DimSize_t> out_dims{batch_size, channels};
+          std::vector<std::size_t> out_dims(in_dims.size(), 1);
+          out_dims[0] = in_dims[0];
+          out_dims[1] = in_dims[1];
           DimSize_t in_nb_elems =
               batch_size * channels * height * width * depth;
           number_of_operation +=