diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 95b2f7b8e2ff70c9b9224bea1137ad74e469ffb8..59dc3cecb684d49b3c6958c103cf1fb0a72ac636 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -23,5 +23,6 @@
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
+#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
 
 #endif /* AIDGE_CPU_IMPORTS_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6e75b6f42d565a481021bdbba17ee0e637f4707e
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -0,0 +1,66 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
+#define __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Scaling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Scaling_Op;
+
+// compute kernel registry for forward and backward
+class ScalingImplForward_cpu
+    : public Registrable<ScalingImplForward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
+};
+class ScalingImplBackward_cpu
+    : public Registrable<ScalingImplBackward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
+};
+
+class ScalingImpl_cpu : public OperatorImpl {
+   private:
+    const Scaling_Op& mOp;
+    std::array<NbElts_t, 1> mNbConsumedData;
+    std::array<NbElts_t, 1> mNbProducedData;
+
+   public:
+    ScalingImpl_cpu(const Scaling_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+
+    static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
+        //std::cout << "ScalingImpl_cpu create" << std::endl;
+        return std::make_unique<ScalingImpl_cpu>(op);
+    }
+
+   public:
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+
+    void updateConsummerProducer() override final;
+
+    void forward();
+
+    void backward();
+};
+
+namespace {
+static Registrar<Scaling_Op> registrarScalingImpl_cpu("cpu", Aidge::ScalingImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_H__ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c5b06290ee04ecf9759f418cd26d83e889fcc84e
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
+#define __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Parameters& params,
+                                     std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+    I scalingFactor = static_cast<I>(std::get<0>(params));
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = input[i] * scalingFactor;
+    }
+}
+
+namespace {
+static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ScalingImpl_cpu_forward_kernel<float, float>);
+static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ScalingImpl_cpu_forward_kernel<int, int>);
+static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ScalingImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__ */
diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c6a96f3bc8ea865da1c31ddfadff67c1e8556ad5
--- /dev/null
+++ b/src/operator/ScalingImpl.cpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <numeric>    // std::accumulate
+#include <functional> // std::multiplies
+
+#include "aidge/operator/Scaling.hpp"
+
+#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
+#include "aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp"
+#include "aidge/utils/Types.h"
+#include <vector>
+
+// FIXME: replace whole Tensor with minimum needed data quantity
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
+    assert(mOp.getInput(0) && "requires valid input");
+
+    // Requires the whole tensors
+    const auto& inputDims = mOp.getInput(0)->dims();
+
+    return std::accumulate(inputDims.begin(), inputDims.end(),
+                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    return 0;
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
+    (void) outputIdx;
+    (void) inputsSize;
+    const auto& outputDims = mOp.getOutput(0)->dims();
+    return std::accumulate(outputDims.begin(), outputDims.end(),
+                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
+    return mNbConsumedData[0];
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
+    return mNbProducedData[0];
+}
+
+void Aidge::ScalingImpl_cpu::updateConsummerProducer(){
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
+
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
+
+void Aidge::ScalingImpl_cpu::forward() {
+    // FIXME: uncomment the following code once memory handling will work
+    assert(mOp.getInput(0) && "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<ScalingImplForward_cpu>::create({
+        mOp.getInput(0)->dataType(),
+        mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(mOp.getParams(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        mOp.getInput(0)->getImpl()->rawPtr(),
+        mOp.getOutput(0)->getImpl()->rawPtr());
+
+
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
+
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
+
+void Aidge::ScalingImpl_cpu::backward() {
+    printf("Not implemented yet.\n");
+}