diff --git a/include/aidge/backend/cpu.hpp b/include/aidge/backend/cpu.hpp
index 84d639f83a471ffb764813184abd3dccc90b1213..5a7ac3958b76e94c8389b0287fdac40c8c3a5ad8 100644
--- a/include/aidge/backend/cpu.hpp
+++ b/include/aidge/backend/cpu.hpp
@@ -19,14 +19,18 @@
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
+#include "aidge/backend/cpu/operator/DivImpl.hpp"
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
+#include "aidge/backend/cpu/operator/MulImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
+#include "aidge/backend/cpu/operator/PowImpl.hpp"
 #include "aidge/backend/cpu/operator/ProducerImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
-#include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/ScalingImpl.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl.hpp"
+#include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
+#include "aidge/backend/cpu/operator/SubImpl.hpp"
 
 #endif /* AIDGE_CPU_IMPORTS_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..655a9f6c8accb80fc85d8bc7bd9bf378d4f48a6b
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_DIVIMPL_H_
+#define AIDGE_CPU_OPERATOR_DIVIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Div_Op;
+
+// compute kernel registry for forward and backward
+class DivImplForward_cpu
+    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+};
+class DivImplBackward_cpu
+    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+};
+
+class DivImpl_cpu : public OperatorImpl {
+public:
+    DivImpl_cpu(const Div_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<DivImpl_cpu> create(const Div_Op& op) {
+        return std::make_unique<DivImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Div_Op> registrarDivImpl_cpu("cpu", Aidge::DivImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_DIVIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..e2ead9ca8de3ed8328b659906336766fbfbb6a47
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/DivImpl.hpp"
+
+namespace Aidge {
+template <class I1, class I2, class O>
+void DivImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
+                                     const void* input1_,
+                                     const void* input2_,
+                                     void* output_) {
+
+    const I1* input_1 = static_cast<const I1*>(input1_);
+    const I2* input_2 = static_cast<const I2*>(input2_);
+    O* output = static_cast<O*>(output_);
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] / input_2[i];
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] / input_2[0];
+        }
+    }
+    else // input_2 is 1d and of size the number of channels of input_1
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            std::size_t channelIdx = i % input2Length;
+            output[i] = input_1[i] / input_2[channelIdx];
+        }
+    }
+}
+
+namespace {
+static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::DivImpl_cpu_forward_kernel<float, float, float>);
+static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::DivImpl_cpu_forward_kernel<int, int, int>);
+static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::DivImpl_cpu_forward_kernel<double, double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..54361e4f5f7a361032c9f4928392f18f183724ac
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_MULIMPL_H_
+#define AIDGE_CPU_OPERATOR_MULIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Mul_Op;
+
+// compute kernel registry for forward and backward
+class MulImplForward_cpu
+    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+};
+class MulImplBackward_cpu
+    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+};
+
+class MulImpl_cpu : public OperatorImpl {
+public:
+    MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<MulImpl_cpu> create(const Mul_Op& op) {
+        return std::make_unique<MulImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Mul_Op> registrarMulImpl_cpu("cpu", Aidge::MulImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_MULIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9caef8b88af3ca779309b60eba984a72db35f84a
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_MULIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_MULIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/MulImpl.hpp"
+
+namespace Aidge {
+template <class I1, class I2, class O>
+void MulImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
+                                     const void* input1_,
+                                     const void* input2_,
+                                     void* output_) {
+
+    const I1* input_1 = static_cast<const I1*>(input1_);
+    const I2* input_2 = static_cast<const I2*>(input2_);
+    O* output = static_cast<O*>(output_);
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] * input_2[i];
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] * input_2[0];
+        }
+    }
+    else // input_2 is 1d and of size the number of channels of input_1
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            std::size_t channelIdx = i % input2Length;
+            output[i] = input_1[i] * input_2[channelIdx];
+        }
+    }
+}
+
+namespace {
+static Registrar<MulImplForward_cpu> registrarMulImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::MulImpl_cpu_forward_kernel<float, float, float>);
+static Registrar<MulImplForward_cpu> registrarMulImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::MulImpl_cpu_forward_kernel<int, int, int>);
+static Registrar<MulImplForward_cpu> registrarMulImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::MulImpl_cpu_forward_kernel<double, double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_MULIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c33fbf0ed4adf4a0206ce8ed32ffdce2cd9ad17c
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_POWIMPL_H_
+#define AIDGE_CPU_OPERATOR_POWIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Pow_Op;
+
+// compute kernel registry for forward and backward
+class PowImplForward_cpu
+    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+};
+class PowImplBackward_cpu
+    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+};
+
+class PowImpl_cpu : public OperatorImpl {
+public:
+    PowImpl_cpu(const Pow_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<PowImpl_cpu> create(const Pow_Op& op) {
+        return std::make_unique<PowImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Pow_Op> registrarPowImpl_cpu("cpu", Aidge::PowImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_POWIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c9c5db7e9aef07d24ba8f80c94b8f2494865e004
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp
@@ -0,0 +1,66 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_POWIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_POWIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+#include <cmath>
+
+#include "aidge/backend/cpu/operator/PowImpl.hpp"
+
+namespace Aidge {
+template <class I1, class I2, class O>
+void PowImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
+                                     const void* input1_,
+                                     const void* input2_,
+                                     void* output_) {
+
+    const I1* input_1 = static_cast<const I1*>(input1_);
+    const I2* input_2 = static_cast<const I2*>(input2_);
+    O* output = static_cast<O*>(output_);
+
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = std::pow(input_1[i], input_2[i]);
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = std::pow(input_1[i], input_2[0]);
+        }
+    }
+    else // input_2 is 1d and of size the number of channels of input_1
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            std::size_t channelIdx = i % input2Length;
+            output[i] = std::pow(input_1[i], input_2[channelIdx]);
+        }
+    }
+}
+
+namespace {
+static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::PowImpl_cpu_forward_kernel<float, float, float>);
+static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::PowImpl_cpu_forward_kernel<int, int, int>);
+static Registrar<PowImplForward_cpu> registrarPowImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::PowImpl_cpu_forward_kernel<double, double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_POWIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..08ec69e509b2b6c02e30f613abd83208de254f75
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SUBIMPL_H_
+#define AIDGE_CPU_OPERATOR_SUBIMPL_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+#include <memory>
+#include <vector>
+
+namespace Aidge {
+// class Sub_Op;
+
+// compute kernel registry for forward and backward
+class SubImplForward_cpu
+    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*,void*)> {
+};
+class SubImplBackward_cpu
+    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const void*, const void*, void*)> {
+};
+
+class SubImpl_cpu : public OperatorImpl {
+public:
+    SubImpl_cpu(const Sub_Op& op) : OperatorImpl(op) {}
+
+    static std::unique_ptr<SubImpl_cpu> create(const Sub_Op& op) {
+        return std::make_unique<SubImpl_cpu>(op);
+    }
+
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    void forward() override;
+};
+
+namespace {
+static Registrar<Sub_Op> registrarSubImpl_cpu("cpu", Aidge::SubImpl_cpu::create);
+}
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_SUBIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..08f2e24fa38d2739943279666187a55d7076a89b
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp
@@ -0,0 +1,65 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_SUBIMPL_FORWARD_KERNEL_H_
+#define AIDGE_CPU_OPERATOR_SUBIMPL_FORWARD_KERNEL_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/SubImpl.hpp"
+
+namespace Aidge {
+template <class I1, class I2, class O>
+void SubImpl_cpu_forward_kernel(std::size_t input1Length,
+                                     std::size_t input2Length,
+                                     const void* input1_,
+                                     const void* input2_,
+                                     void* output_) {
+
+    const I1* input_1 = static_cast<const I1*>(input1_);
+    const I2* input_2 = static_cast<const I2*>(input2_);
+    O* output = static_cast<O*>(output_);
+
+    if (input2Length == input1Length)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] - input_2[i];
+        }
+    }
+    else if (input2Length == 1)
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            output[i] = input_1[i] - input_2[0];
+        }
+    }
+    else // input_2 is 1d and of size the number of channels of input_1
+    {
+        for (std::size_t i = 0; i < input1Length; ++i) {
+            std::size_t channelIdx = i % input2Length;
+            output[i] = input_1[i] - input_2[channelIdx];
+        }
+    }
+}
+
+namespace {
+static Registrar<SubImplForward_cpu> registrarSubImplForward_cpu_Float32(
+        {DataType::Float32, DataType::Float32, DataType::Float32},
+        Aidge::SubImpl_cpu_forward_kernel<float, float, float>);
+static Registrar<SubImplForward_cpu> registrarSubImplForward_cpu_Int32(
+        {DataType::Int32, DataType::Int32, DataType::Int32},
+        Aidge::SubImpl_cpu_forward_kernel<int, int, int>);
+static Registrar<SubImplForward_cpu> registrarSubImplForward_cpu_Float64(
+        {DataType::Float64, DataType::Float64, DataType::Float64},
+        Aidge::SubImpl_cpu_forward_kernel<double, double, double>);
+}  // namespace
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_SUBIMPL_FORWARD_KERNEL_H_ */
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f7cbc7d20b9126ab318a6989ebf627491cb247aa
--- /dev/null
+++ b/src/operator/DivImpl.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Div.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/DivImpl.hpp"
+#include "aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::DivImpl_cpu::forward() {
+    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.getInput(1) && "missing input #1");
+
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
+            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
+           ) &&
+           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<DivImplForward_cpu>::create({
+        mOp.getInput(0)->dataType(),
+        mOp.getInput(1)->dataType(),
+        mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
+        mOp.getInput(0)->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
+        mOp.getOutput(0)->getImpl()->rawPtr());
+}
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b6eb245cf0b1afc8893dfbab13d3294b945b3e0e
--- /dev/null
+++ b/src/operator/MulImpl.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Mul.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/MulImpl.hpp"
+#include "aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::MulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::MulImpl_cpu::forward() {
+    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.getInput(1) && "missing input #1");
+
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
+            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
+           ) &&
+           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<MulImplForward_cpu>::create({
+        mOp.getInput(0)->dataType(),
+        mOp.getInput(1)->dataType(),
+        mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
+        mOp.getInput(0)->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
+        mOp.getOutput(0)->getImpl()->rawPtr());
+}
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..52a4f46956e0d0f348583a23772c519a64ca857d
--- /dev/null
+++ b/src/operator/PowImpl.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Pow.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/PowImpl.hpp"
+#include "aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::PowImpl_cpu::forward() {
+    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.getInput(1) && "missing input #1");
+
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
+            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
+           ) &&
+           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<PowImplForward_cpu>::create({
+        mOp.getInput(0)->dataType(),
+        mOp.getInput(1)->dataType(),
+        mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
+        mOp.getInput(0)->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
+        mOp.getOutput(0)->getImpl()->rawPtr());
+}
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6d87821d89ff84aa1046a9ecf0fdd83dcc5dda53
--- /dev/null
+++ b/src/operator/SubImpl.cpp
@@ -0,0 +1,51 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <chrono>  // std::chrono::milliseconds
+#include <numeric> // std::accumulate
+#include <thread>  // std::this_thread::sleep_for
+#include <vector>
+
+#include "aidge/operator/Sub.hpp"
+#include "aidge/utils/Types.h"
+
+#include "aidge/backend/cpu/operator/SubImpl.hpp"
+#include "aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp"
+
+Aidge::NbElts_t Aidge::SubImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
+    // this implementation can be in-place
+    return 0;
+}
+
+void Aidge::SubImpl_cpu::forward() {
+    assert(mOp.getInput(0) && "missing input #0");
+    assert(mOp.getInput(1) && "missing input #1");
+
+    assert(((mOp.getInput(1)->size() == 1) || 
+            (mOp.getInput(1)->size() == mOp.getInput(0)->size()) ||
+            (mOp.getInput(1)->nbDims() == 1 && mOp.getInput(1)->size() == mOp.getInput(0)->dims()[mOp.getInput(0)->nbDims()-1])
+           ) &&
+           "input #1 must either be a tensor of size 1, the number of channels of input # or the same size of input #0");
+
+    // Find the correct kernel type
+    auto kernelFunc = Registrar<SubImplForward_cpu>::create({
+        mOp.getInput(0)->dataType(),
+        mOp.getInput(1)->dataType(),
+        mOp.getOutput(0)->dataType()});
+
+    // Call kernel
+    kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
+        std::static_pointer_cast<Tensor>(mOp.getInput(1))->size(),
+        mOp.getInput(0)->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
+        mOp.getOutput(0)->getImpl()->rawPtr());
+}
diff --git a/unit_tests/operator/Test_DivImpl.cpp b/unit_tests/operator/Test_DivImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c33319c88b63ee834bbcb388bbbe0775699edbd7
--- /dev/null
+++ b/unit_tests/operator/Test_DivImpl.cpp
@@ -0,0 +1,207 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Div.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Div(forward)") {
+    SECTION("2D Tensor by Singleton") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.07607108, 0.44075000},
+                {0.19494885, 0.20071143}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{0.5}});
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.15214217, 0.88150001},
+                {0.38989770, 0.40142286}
+            }
+        });
+
+        std::shared_ptr<Node> myDiv = Div();
+        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setBackend("cpu");
+        myDiv->getOperator()->associateInput(0, input_1);
+        myDiv->getOperator()->associateInput(1, input_2);
+        myDiv->getOperator()->computeOutputDims();
+        myDiv->forward();
+
+        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.79780143, 0.49322051},
+                {0.84239346, 0.83737719}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.59088874, 0.78858775},
+                {0.42879432, 0.17615074}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {1.35017204, 0.62544787},
+                {1.96456301, 4.75375366}
+            }
+        });
+
+        std::shared_ptr<Node> myDiv = Div();
+        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setBackend("cpu");
+        myDiv->getOperator()->associateInput(0, input_1);
+        myDiv->getOperator()->associateInput(1, input_2);
+        myDiv->getOperator()->computeOutputDims();
+        myDiv->forward();
+
+        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("3D Tensor by 1D Tensor") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.24180168, 0.44319558, 0.06437260},
+                 {0.21270001, 0.34570599, 0.44151264}},
+
+                {{0.62294692, 0.98043168, 0.18628585},
+                 {0.33591706, 0.03432965, 0.32130069}}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
+            {0.63475525, 0.58620811, 0.69340748}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.38093686, 0.75603795, 0.09283517},
+                 {0.33508980, 0.58973253, 0.63672900}},
+
+                {{0.98139703, 1.67249763, 0.26865280},
+                 {0.52920723, 0.05856223, 0.46336490}}
+            }
+        });
+
+        std::shared_ptr<Node> myDiv = Div();
+        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setBackend("cpu");
+        myDiv->getOperator()->associateInput(0, input_1);
+        myDiv->getOperator()->associateInput(1, input_2);
+        myDiv->getOperator()->computeOutputDims();
+        myDiv->forward();
+
+        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 12; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
+            {
+                {
+                    {{0.25675946, 0.36265653, 0.22386390},
+                     {0.30483031, 0.97449398, 0.73871714},
+                     {0.36169255, 0.04510212, 0.27525920}},
+
+                    {{0.73255682, 0.03885978, 0.24181491},
+                    {0.14465559, 0.86070061, 0.88848090},
+                    {0.74408931, 0.87412918, 0.19800508}},
+
+                    {{0.43551809, 0.73437816, 0.37513995},
+                     {0.25414777, 0.06396711, 0.98708153},
+                     {0.02140611, 0.84974837, 0.62108254}}
+                },
+                {
+                    {{0.86227137, 0.69357753, 0.41814715},
+                     {0.76048166, 0.46306920, 0.05907208},
+                     {0.76625377, 0.91793799, 0.92988223}},
+
+                    {{0.34362513, 0.85009813, 0.21107805},
+                     {0.65575773, 0.38140792, 0.48540717},
+                     {0.10045588, 0.85803932, 0.23778951}},
+
+                    {{0.30316389, 0.04176688, 0.17290735},
+                     {0.07942408, 0.48647392, 0.39440966},
+                     {0.26543915, 0.92589515, 0.83948994}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}});
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
+            {
+                {
+                    {{0.08558649, 0.12088551, 0.07462130},
+                     {0.10161010, 0.32483134, 0.24623905},
+                     {0.12056419, 0.01503404, 0.09175307}},
+
+                    {{0.24418561, 0.01295326, 0.08060497},
+                     {0.04821853, 0.28690019, 0.29616031},
+                     {0.24802977, 0.29137638, 0.06600169}},
+
+                    {{0.14517270, 0.24479271, 0.12504666},
+                     {0.08471593, 0.02132237, 0.32902718},
+                     {0.00713537, 0.28324947, 0.20702751}}
+                },
+                {
+                    {{0.28742379, 0.23119251, 0.13938238},
+                     {0.25349388, 0.15435641, 0.01969069},
+                     {0.25541791, 0.30597934, 0.30996075}},
+
+                    {{0.11454171, 0.28336605, 0.07035935},
+                     {0.21858591, 0.12713598, 0.16180240},
+                     {0.03348529, 0.28601310, 0.07926317}},
+
+                    {{0.10105463, 0.01392229, 0.05763578},
+                     {0.02647469, 0.16215797, 0.13146989},
+                     {0.08847972, 0.30863172, 0.27982998}}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myDiv = Div();
+        myDiv->getOperator()->setDatatype(DataType::Float32);
+        myDiv->getOperator()->setBackend("cpu");
+        myDiv->getOperator()->associateInput(0, input_1);
+        myDiv->getOperator()->associateInput(1, input_2);
+        myDiv->getOperator()->computeOutputDims();
+        myDiv->forward();
+
+        float* resPtr = static_cast<float*>(myDiv->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 54; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_MulImpl.cpp b/unit_tests/operator/Test_MulImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cea62f998cfc538d1d5800639e461eb4d15cb270
--- /dev/null
+++ b/unit_tests/operator/Test_MulImpl.cpp
@@ -0,0 +1,129 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Mul.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Mul(forward)") {
+    SECTION("2D Tensor by Singleton") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.38977361, 0.34064174},
+                {0.00427264, 0.90872520}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{3.0}});
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {1.16932082, 1.02192521},
+                {0.01281792, 2.72617555}
+            }
+        });
+
+        std::shared_ptr<Node> myMul = Mul();
+        myMul->getOperator()->setDatatype(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        myMul->getOperator()->associateInput(0, input_1);
+        myMul->getOperator()->associateInput(1, input_2);
+        myMul->getOperator()->computeOutputDims();
+        myMul->forward();
+
+        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.38977361, 0.34064174},
+                {0.00427264, 0.90872520}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.02362096, 0.24084556},
+                {0.94690859, 0.13512510}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.00920683, 0.08204205},
+                {0.00404580, 0.12279158}
+            }
+        });
+
+        std::shared_ptr<Node> myMul = Mul();
+        myMul->getOperator()->setDatatype(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        myMul->getOperator()->associateInput(0, input_1);
+        myMul->getOperator()->associateInput(1, input_2);
+        myMul->getOperator()->computeOutputDims();
+        myMul->forward();
+
+        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("3D Tensor by 1D Tensor") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.33647752, 0.89360154, 0.46586215},
+                 {0.71518236, 0.71481097, 0.97991812}},
+
+                {{0.17393428, 0.56849813, 0.18489265},
+                 {0.78397650, 0.00348300, 0.65758008}}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
+            {0.15380561, 0.51063120, 0.93031412}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.05175213, 0.45630082, 0.43339813},
+                 {0.10999906, 0.36500478, 0.91163164}},
+
+                {{0.02675207, 0.29029289, 0.17200825},
+                 {0.12057999, 0.00177853, 0.61175603}}
+            }
+        });
+
+        std::shared_ptr<Node> myMul = Mul();
+        myMul->getOperator()->setDatatype(DataType::Float32);
+        myMul->getOperator()->setBackend("cpu");
+        myMul->getOperator()->associateInput(0, input_1);
+        myMul->getOperator()->associateInput(1, input_2);
+        myMul->getOperator()->computeOutputDims();
+        myMul->forward();
+
+        float* resPtr = static_cast<float*>(myMul->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 12; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_PowImpl.cpp b/unit_tests/operator/Test_PowImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7293198f411510904ee73aced47b69dfc37374af
--- /dev/null
+++ b/unit_tests/operator/Test_PowImpl.cpp
@@ -0,0 +1,203 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pow.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Pow(forward)") {
+    SECTION("2D Tensor by Singleton") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.42139274, 0.51524192},
+                {0.85247433, 0.13432795}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.17757183, 0.26547423},
+                {0.72671247, 0.01804400}
+            }
+        });
+
+        std::shared_ptr<Node> myPow = Pow();
+        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setBackend("cpu");
+        myPow->getOperator()->associateInput(0, input_1);
+        myPow->getOperator()->associateInput(1, input_2);
+        myPow->getOperator()->computeOutputDims();
+        myPow->forward();
+
+        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("3D Tensor by 1D Tensor") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.87519985, 0.10536593, 0.20268351},
+                 {0.75532353, 0.95977652, 0.03897029}},
+
+                {{0.67554104, 0.35499334, 0.27741563},
+                 {0.94270861, 0.48397779, 0.35532343}}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
+            {0.39333701, 0.08719915, 0.16713941}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.94891787, 0.82182676, 0.76584703},
+                 {0.89549923, 0.99642646, 0.58137459}},
+
+                {{0.85702944, 0.91364944, 0.80709606},
+                 {0.97706109, 0.93867886, 0.84118503}}
+            }
+        });
+
+        std::shared_ptr<Node> myPow = Pow();
+        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setBackend("cpu");
+        myPow->getOperator()->associateInput(0, input_1);
+        myPow->getOperator()->associateInput(1, input_2);
+        myPow->getOperator()->computeOutputDims();
+        myPow->forward();
+
+        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 12; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.79780143, 0.49322051},
+                {0.84239346, 0.83737719}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.59088874, 0.78858775},
+                {0.42879432, 0.17615074}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.87504572, 0.57271165},
+                {0.92909741, 0.96922028}
+            }
+        });
+
+        std::shared_ptr<Node> myPow = Pow();
+        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setBackend("cpu");
+        myPow->getOperator()->associateInput(0, input_1);
+        myPow->getOperator()->associateInput(1, input_2);
+        myPow->getOperator()->computeOutputDims();
+        myPow->forward();
+
+        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("4D Tensor") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
+            {
+                {
+                    {{0.80191749, 0.45388508, 0.86550850},
+                     {0.47226250, 0.55809456, 0.59451854},
+                     {0.45497441, 0.02653158, 0.44041735}},
+                    {{0.30726379, 0.73146582, 0.46462774},
+                     {0.30268502, 0.78075552, 0.65154958},
+                     {0.91332406, 0.62448132, 0.53238851}},
+                    {{0.13917381, 0.43061519, 0.30198061},
+                     {0.12880909, 0.08995515, 0.29609048},
+                     {0.46449280, 0.47559714, 0.24193990}}
+                },
+                {
+                    {{0.87349969, 0.51625526, 0.16921073},
+                     {0.95035923, 0.10066575, 0.56729180},
+                     {0.84686232, 0.05965143, 0.03635806}},
+                    {{0.61107808, 0.59954077, 0.45627308},
+                     {0.84114522, 0.77186388, 0.37427086},
+                     {0.13415480, 0.00617349, 0.84260136}},
+                    {{0.55090177, 0.57292056, 0.29158932},
+                     {0.67131883, 0.96988875, 0.69545972},
+                     {0.80979776, 0.18238151, 0.19527155}}
+                }
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.0}});
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array4D<float,2,3,3,3> {
+            {
+                {
+                    {{6.43071651e-01, 2.06011668e-01, 7.49104977e-01},
+                     {2.23031864e-01, 3.11469525e-01, 3.53452295e-01},
+                     {2.07001716e-01, 7.03924568e-04, 1.93967447e-01}},
+
+                    {{9.44110379e-02, 5.35042226e-01, 2.15878934e-01},
+                     {9.16182250e-02, 6.09579206e-01, 4.24516857e-01},
+                     {8.34160864e-01, 3.89976919e-01, 2.83437520e-01}},
+
+                    {{1.93693489e-02, 1.85429439e-01, 9.11922902e-02},
+                     {1.65917836e-02, 8.09192937e-03, 8.76695737e-02},
+                     {2.15753555e-01, 2.26192638e-01, 5.85349165e-02}}
+                },
+                {
+                    {{7.63001740e-01, 2.66519487e-01, 2.86322720e-02},
+                     {9.03182685e-01, 1.01335924e-02, 3.21819991e-01},
+                     {7.17175782e-01, 3.55829368e-03, 1.32190844e-03}},
+
+                    {{3.73416424e-01, 3.59449148e-01, 2.08185121e-01},
+                     {7.07525253e-01, 5.95773816e-01, 1.40078679e-01},
+                     {1.79975089e-02, 3.81119971e-05, 7.09977031e-01}},
+
+                    {{3.03492755e-01, 3.28237981e-01, 8.50243345e-02},
+                     {4.50668961e-01, 9.40684199e-01, 4.83664215e-01},
+                     {6.55772448e-01, 3.32630165e-02, 3.81309800e-02}}
+                }
+            }
+        });
+
+        std::shared_ptr<Node> myPow = Pow();
+        myPow->getOperator()->setDatatype(DataType::Float32);
+        myPow->getOperator()->setBackend("cpu");
+        myPow->getOperator()->associateInput(0, input_1);
+        myPow->getOperator()->associateInput(1, input_2);
+        myPow->getOperator()->computeOutputDims();
+        myPow->forward();
+
+        float* resPtr = static_cast<float*>(myPow->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 54; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+    }
+}
\ No newline at end of file
diff --git a/unit_tests/operator/Test_SubImpl.cpp b/unit_tests/operator/Test_SubImpl.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d741602cf02958a88bb41bbd2927577027acb069
--- /dev/null
+++ b/unit_tests/operator/Test_SubImpl.cpp
@@ -0,0 +1,129 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Sub.hpp"
+
+#include "aidge/backend/cpu.hpp"
+
+#include <memory>
+
+using namespace Aidge;
+
+TEST_CASE("[cpu/operator] Sub(forward)") {
+    SECTION("2D Tensor by Singleton") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.34234560, 0.92812711},
+                {0.73706615, 0.69953883}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,1,1>{{2.5}});
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {-2.15765429, -1.57187295},
+                {-1.76293385, -1.80046117}
+            }
+        });
+
+        std::shared_ptr<Node> mySub = Sub();
+        mySub->getOperator()->setDatatype(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        mySub->getOperator()->associateInput(0, input_1);
+        mySub->getOperator()->associateInput(1, input_2);
+        mySub->getOperator()->computeOutputDims();
+        mySub->forward();
+
+        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("2D Tensors") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {0.34234560, 0.92812711},
+                {0.73706615, 0.69953883}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array2D<float,2,2>{
+            {
+                {0.61729127, 0.83004373},
+                {0.72002089, 0.52473849}
+            }
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array2D<float,2,2> {
+            {
+                {-0.27494568,  0.09808338},
+                {0.01704526,  0.17480034}
+            }
+        });
+
+        std::shared_ptr<Node> mySub = Sub();
+        mySub->getOperator()->setDatatype(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        mySub->getOperator()->associateInput(0, input_1);
+        mySub->getOperator()->associateInput(1, input_2);
+        mySub->getOperator()->computeOutputDims();
+        mySub->forward();
+
+        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 4; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+
+    SECTION("3D Tensor by 1D Tensor") {
+        std::shared_ptr<Tensor> input_1 = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.84181279, 0.20655948, 0.09750116},
+                 {0.37723488, 0.73120135, 0.04666907}},
+
+                {{0.91483921, 0.93985939, 0.58823180},
+                 {0.39963132, 0.67879969, 0.33209187}}
+            }
+        });
+        std::shared_ptr<Tensor> input_2 =  std::make_shared<Tensor>(Array1D<float,3>{
+            {0.04784805, 0.91903114, 0.38606840}
+        });
+        std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array3D<float,2,2,3> {
+            {
+                {{0.79396474, -0.71247166, -0.28856725},
+                 {0.32938683, -0.18782979, -0.33939934}},
+
+                {{0.86699116,  0.02082825,  0.20216340},
+                 {0.35178328, -0.24023145, -0.05397654}}
+            }
+        });
+
+        std::shared_ptr<Node> mySub = Sub();
+        mySub->getOperator()->setDatatype(DataType::Float32);
+        mySub->getOperator()->setBackend("cpu");
+        mySub->getOperator()->associateInput(0, input_1);
+        mySub->getOperator()->associateInput(1, input_2);
+        mySub->getOperator()->computeOutputDims();
+        mySub->forward();
+
+        float* resPtr = static_cast<float*>(mySub->getOperator()->getOutput(0)->getImpl()->rawPtr());
+        float* expectedPtr = static_cast<float*>(expectedOutput->getImpl()->rawPtr());
+        for (std::size_t i = 0; i< 12; ++i) {
+            REQUIRE(std::abs(resPtr[i]-expectedPtr[i]) < 0.00001);
+        }
+
+    }
+}
\ No newline at end of file