diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..a9c77f2e5d7bbe1c79cc8695added3b410262688
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -0,0 +1,66 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
+#define __AIDGE_CPU_OPERATOR_ScalingIMPL_H__
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Scaling.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Scaling_Op;
+
+// compute kernel registry for forward and backward
+class ScalingImplForward_cpu
+    : public Registrable<ScalingImplForward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
+};
+class ScalingImplBackward_cpu
+    : public Registrable<ScalingImplBackward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> {
+};
+
+class ScalingImpl_cpu : public OperatorImpl {
+   private:
+    const Scaling_Op& mOp;
+    std::array<NbElts_t, 1> mNbConsumedData;
+    std::array<NbElts_t, 1> mNbProducedData;
+
+   public:
+    ScalingImpl_cpu(const Scaling_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+
+    static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
+        std::cout << "ScalingImpl_cpu create" << std::endl;
+        return std::make_unique<ScalingImpl_cpu>(op);
+    }
+
+   public:
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
+
+    void updateConsummerProducer() override final;
+
+    void forward();
+
+    void backward();
+};
+
+namespace {
+static Registrar<Scaling_Op> registrarScalingImpl_cpu("cpu", Aidge::ScalingImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_H__ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c5b06290ee04ecf9759f418cd26d83e889fcc84e
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp
@@ -0,0 +1,45 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
+#define __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Parameters& params,
+                                     std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+    I scalingFactor = static_cast<I>(std::get<0>(params));
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = input[i] * scalingFactor;
+    }
+}
+
+namespace {
+static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32}, Aidge::ScalingImpl_cpu_forward_kernel<float, float>);
+static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32}, Aidge::ScalingImpl_cpu_forward_kernel<int, int>);
+static Registrar<ScalingImplForward_cpu> registrarScalingImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64}, Aidge::ScalingImpl_cpu_forward_kernel<double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* __AIDGE_CPU_OPERATOR_ScalingIMPL_FORWARD_KERNEL_H__ */
diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0770a0ccc1434d03fc26b07c425053cd7c09bee6
--- /dev/null
+++ b/src/operator/ScalingImpl.cpp
@@ -0,0 +1,84 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+
+#include "aidge/operator/Scaling.hpp"
+
+#include "aidge/backend/cpu/operator/ScalingImpl.hpp"
+#include "aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp"
+#include "aidge/utils/Types.h"
+#include <numeric>
+#include <vector>
+
+// FIXME: replace whole Tensor with minimum needed data quantity
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
+    assert(mOp.getInput(0) && "requires valid input");
+
+    // Requires the whole tensors
+    const auto& inputDims = mOp.getInput(0)->dims();
+
+    return std::accumulate(inputDims.begin(), inputDims.end(),
+                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    return 0;
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
+    const auto& outputDims = mOp.getOutput(0)->dims();
+    return std::accumulate(outputDims.begin(), outputDims.end(),
+                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
+    return mNbConsumedData[0];
+}
+
+Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
+    return mNbProducedData[0];
+}
+
+void Aidge::ScalingImpl_cpu::updateConsummerProducer(){
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
+
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
+
+void Aidge::ScalingImpl_cpu::forward() {
+    // FIXME: uncomment the following code once memory handling will work
+    assert(mOp.getInput(0) && "missing input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<ScalingImplForward_cpu>::create({
+        mOp.getInput(0)->dataType(),
+        mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(mOp.getParams(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        mOp.getInput(0)->getImpl()->rawPtr(),
+        mOp.getOutput(0)->getImpl()->rawPtr());
+
+
+    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
+
+    mNbProducedData[0]+= getRequiredMemory(0, {});
+}
+
+void Aidge::ScalingImpl_cpu::backward() {
+    printf("Not implemented yet.\n");
+}