From f203e3acb570ec22423602f036f2db24022f0425 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Tue, 10 Sep 2024 18:32:43 +0200
Subject: [PATCH] Finished operators adaptation

---
 .../aidge/backend/cpu/operator/AbsImpl.hpp    |  31 +----
 ...orward_kernels.hpp => AbsImpl_kernels.hpp} |  24 ++--
 .../aidge/backend/cpu/operator/AddImpl.hpp    |  30 +----
 ...orward_kernels.hpp => AddImpl_kernels.hpp} |  29 +++--
 .../aidge/backend/cpu/operator/AndImpl.hpp    |  29 +----
 ...orward_kernels.hpp => AndImpl_kernels.hpp} |  33 +++--
 .../aidge/backend/cpu/operator/ArgMaxImpl.hpp |  44 ++-----
 ...ard_kernels.hpp => ArgMaxImpl_kernels.hpp} |  24 ++--
 .../backend/cpu/operator/AvgPoolingImpl.hpp   |  49 ++------
 ...kernels.hpp => AvgPoolingImpl_kernels.hpp} |  27 ++--
 .../backend/cpu/operator/BatchNormImpl.hpp    |  63 +++-------
 ..._kernels.hpp => BatchNormImpl_kernels.hpp} |  19 ++-
 .../cpu/operator/ConvDepthWiseImpl.hpp        | 100 ++++-----------
 ...nels.hpp => ConvDepthWiseImpl_kernels.hpp} |  48 ++++---
 .../aidge/backend/cpu/operator/ConvImpl.hpp   |  32 ++---
 .../backend/cpu/operator/ConvImpl_kernels.hpp |   2 +
 .../aidge/backend/cpu/operator/DivImpl.hpp    |  32 +----
 ...orward_kernels.hpp => DivImpl_kernels.hpp} |  29 ++---
 .../aidge/backend/cpu/operator/ErfImpl.hpp    |  31 +----
 ...orward_kernels.hpp => ErfImpl_kernels.hpp} |  24 ++--
 include/aidge/backend/cpu/operator/FCImpl.hpp |  68 ++++------
 .../cpu/operator/FCImpl_backward_kernels.hpp  |  92 --------------
 ...forward_kernels.hpp => FCImpl_kernels.hpp} |  81 +++++++++---
 .../aidge/backend/cpu/operator/FoldImpl.hpp   |  41 +++---
 ...rward_kernels.hpp => FoldImpl_kernels.hpp} |  27 ++--
 .../cpu/operator/GlobalAveragePoolingImpl.hpp |  35 +-----
 ...p => GlobalAveragePoolingImpl_kernels.hpp} |  31 ++---
 .../backend/cpu/operator/LeakyReLUImpl.hpp    |  49 +++-----
 .../LeakyReLUImpl_backward_kernels.hpp        |  45 -------
 .../LeakyReLUImpl_forward_kernels.hpp         |  45 -------
 .../cpu/operator/LeakyReLUImpl_kernels.hpp    |  62 ++++++++++
 include/aidge/backend/cpu/operator/LnImpl.hpp |  35 ++----
 .../cpu/operator/LnImpl_forward_kernels.hpp   |  47 -------
 ...ackward_kernels.hpp => LnImpl_kernels.hpp} | 117 ++++++++++--------
 .../aidge/backend/cpu/operator/MatMulImpl.hpp |  31 ++---
 ...ard_kernels.hpp => MatMulImpl_kernels.hpp} |  28 ++---
 .../backend/cpu/operator/MaxPoolingImpl.hpp   |  42 ++-----
 ...kernels.hpp => MaxPoolingImpl_kernels.hpp} |  27 ++--
 .../aidge/backend/cpu/operator/MulImpl.hpp    |  31 +----
 ...orward_kernels.hpp => MulImpl_kernels.hpp} |  33 +++--
 .../aidge/backend/cpu/operator/PadImpl.hpp    |   5 +-
 src/operator/AbsImpl.cpp                      |  15 ++-
 src/operator/AddImpl.cpp                      |  59 ++++-----
 src/operator/AndImpl.cpp                      |  20 +--
 src/operator/ArgMaxImpl.cpp                   |  15 ++-
 src/operator/AvgPoolingImpl.cpp               |  15 ++-
 src/operator/BatchNormImpl.cpp                |  16 ++-
 src/operator/ConvDepthWiseImpl.cpp            |  42 +++----
 src/operator/ConvImpl.cpp                     |   2 +
 src/operator/DivImpl.cpp                      |  17 +--
 src/operator/ErfImpl.cpp                      |  15 ++-
 src/operator/FCImpl.cpp                       |  46 ++-----
 src/operator/FoldImpl.cpp                     |  15 ++-
 src/operator/GlobalAveragePoolingImpl.cpp     |  17 ++-
 src/operator/LeakyReLUImpl.cpp                |  17 ++-
 src/operator/LnImpl.cpp                       |  19 ++-
 src/operator/MatMulImpl.cpp                   |  14 ++-
 src/operator/MaxPoolingImpl.cpp               |  15 ++-
 src/operator/MulImpl.cpp                      |  19 +--
 59 files changed, 777 insertions(+), 1273 deletions(-)
 rename include/aidge/backend/cpu/operator/{AbsImpl_forward_kernels.hpp => AbsImpl_kernels.hpp} (57%)
 rename include/aidge/backend/cpu/operator/{AddImpl_forward_kernels.hpp => AddImpl_kernels.hpp} (62%)
 rename include/aidge/backend/cpu/operator/{AndImpl_forward_kernels.hpp => AndImpl_kernels.hpp} (61%)
 rename include/aidge/backend/cpu/operator/{ArgMaxImpl_forward_kernels.hpp => ArgMaxImpl_kernels.hpp} (77%)
 rename include/aidge/backend/cpu/operator/{AvgPoolingImpl_forward_kernels.hpp => AvgPoolingImpl_kernels.hpp} (85%)
 rename include/aidge/backend/cpu/operator/{BatchNormImpl_forward_kernels.hpp => BatchNormImpl_kernels.hpp} (90%)
 rename include/aidge/backend/cpu/operator/{ConvDepthWiseImpl_forward_kernels.hpp => ConvDepthWiseImpl_kernels.hpp} (83%)
 rename include/aidge/backend/cpu/operator/{DivImpl_forward_kernels.hpp => DivImpl_kernels.hpp} (77%)
 rename include/aidge/backend/cpu/operator/{ErfImpl_forward_kernels.hpp => ErfImpl_kernels.hpp} (57%)
 delete mode 100644 include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
 rename include/aidge/backend/cpu/operator/{FCImpl_forward_kernels.hpp => FCImpl_kernels.hpp} (63%)
 rename include/aidge/backend/cpu/operator/{FoldImpl_forward_kernels.hpp => FoldImpl_kernels.hpp} (80%)
 rename include/aidge/backend/cpu/operator/{GlobalAveragePoolingImpl_forward_kernels.hpp => GlobalAveragePoolingImpl_kernels.hpp} (68%)
 delete mode 100644 include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp
 delete mode 100644 include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp
 create mode 100644 include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp
 delete mode 100755 include/aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp
 rename include/aidge/backend/cpu/operator/{LnImpl_backward_kernels.hpp => LnImpl_kernels.hpp} (50%)
 rename include/aidge/backend/cpu/operator/{MatMulImpl_forward_kernels.hpp => MatMulImpl_kernels.hpp} (62%)
 rename include/aidge/backend/cpu/operator/{MaxPoolingImpl_forward_kernels.hpp => MaxPoolingImpl_kernels.hpp} (91%)
 rename include/aidge/backend/cpu/operator/{MulImpl_forward_kernels.hpp => MulImpl_kernels.hpp} (61%)

diff --git a/include/aidge/backend/cpu/operator/AbsImpl.hpp b/include/aidge/backend/cpu/operator/AbsImpl.hpp
index e53b3154..8233d47c 100644
--- a/include/aidge/backend/cpu/operator/AbsImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AbsImpl.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CPU_OPERATOR_ABSIMPL_H_
 #define AIDGE_CPU_OPERATOR_ABSIMPL_H_
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/Abs.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -20,31 +20,12 @@
 #include <vector>
 
 namespace Aidge {
-// class Abs_Op;
+// Operator implementation entry point for the backend
+using AbsImpl_cpu = OperatorImpl_cpu<Abs_Op,
+    void(const std::size_t, const void*, void*)>;
 
-// compute kernel registry for forward and backward
-class AbsImplForward_cpu
-    : public Registrable<AbsImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
-};
-class AbsImplBackward_cpu
-    : public Registrable<AbsImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
-};
-
-class AbsImpl_cpu : public OperatorImpl {
-public:
-    AbsImpl_cpu(const Abs_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<AbsImpl_cpu> create(const Abs_Op& op) {
-        return std::make_unique<AbsImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-static Registrar<Abs_Op> registrarAbsImpl_cpu("cpu", Aidge::AbsImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(Abs_Op, "cpu", Aidge::AbsImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_ABSIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/AbsImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp
similarity index 57%
rename from include/aidge/backend/cpu/operator/AbsImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp
index 4922d627..16e5f9de 100644
--- a/include/aidge/backend/cpu/operator/AbsImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AbsImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_ABSIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_ABSIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_ABSIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_ABSIMPL_KERNELS_H_
 
 #include <cmath>
 
@@ -32,14 +32,16 @@ void AbsImpl_cpu_forward_kernel(std::size_t inputLenght,
     }
 }
 
-namespace {
-static Registrar<AbsImplForward_cpu> registrarAbsImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::AbsImpl_cpu_forward_kernel<float, float>);
-static Registrar<AbsImplForward_cpu> registrarAbsImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::AbsImpl_cpu_forward_kernel<int, int>);
-static Registrar<AbsImplForward_cpu> registrarAbsImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::AbsImpl_cpu_forward_kernel<double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(AbsImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::AbsImpl_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(AbsImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::AbsImpl_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(AbsImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::AbsImpl_cpu_forward_kernel<std::int32_t, std::int32_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ABSIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_ABSIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index f71e3fcf..5e795922 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -17,36 +17,18 @@
 #include <string>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/Add.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+// Operator implementation entry point for the backend
+using AddImpl_cpu = OperatorImpl_cpu<Add_Op,
+    void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)>;
 
-// compute kernel registry for forward and backward
-class AddImplForward_cpu
-    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)>> {};
-
-class AddImplBackward_cpu
-    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)>> {};
-
-
-class AddImpl_cpu : public OperatorImpl {
-public:
-    AddImpl_cpu(const Add_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<AddImpl_cpu> create(const Add_Op& op) {
-        return std::make_unique<AddImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-static Registrar<Add_Op> registrarAddImpl_cpu("cpu", Aidge::AddImpl_cpu::create);
-}  // namespace
+// Implementation entry point registration to Operator
+REGISTRAR(Add_Op, "cpu", Aidge::AddImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_ADDIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
similarity index 62%
rename from include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
index 94b22dcc..141136bb 100644
--- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_ADDIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_ADDIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_ADDIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_ADDIMPL_KERNELS_H_
 
 #include "aidge/utils/Registrar.hpp"
 
@@ -41,16 +41,19 @@ void AddImpl_cpu_forward_kernel(const std::vector<const void*> inputs_, const st
 	}
 }
 
-namespace {
-static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::AddImpl_cpu_forward_kernel<float, float>);
-static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::AddImpl_cpu_forward_kernel<double, double>);
-static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::AddImpl_cpu_forward_kernel<std::int32_t, std::int32_t>);
-static Registrar<AddImplForward_cpu> registrarAddImplForward_cpu_Int64(
-        {DataType::Int64, DataType::Int64}, Aidge::AddImpl_cpu_forward_kernel<std::int64_t, std::int64_t>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(AddImpl_cpu,
+    {{DataType::Any}, {DataType::Float32}},
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(AddImpl_cpu,
+    {{DataType::Any}, {DataType::Float64}},
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(AddImpl_cpu,
+    {{DataType::Any}, {DataType::Int32}},
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int32_t, std::int32_t>, nullptr});
+REGISTRAR(AddImpl_cpu,
+    {{DataType::Any}, {DataType::Int64}},
+    {ProdConso::inPlaceModel, Aidge::AddImpl_cpu_forward_kernel<std::int64_t, std::int64_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_FORWARD_KERNEL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CPU_OPERATOR_ADDIMPL_CPU_KERNELS_H_ */
\ No newline at end of file
diff --git a/include/aidge/backend/cpu/operator/AndImpl.hpp b/include/aidge/backend/cpu/operator/AndImpl.hpp
index fd8cebbc..316a2fb9 100644
--- a/include/aidge/backend/cpu/operator/AndImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AndImpl.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CPU_OPERATOR_ANDIMPL_H_
 #define AIDGE_CPU_OPERATOR_ANDIMPL_H_
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/And.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -21,29 +21,12 @@
 #include <vector>
 
 namespace Aidge {
-// compute kernel registry for forward and backward
-class AndImplForward_cpu
-    : public Registrable<AndImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
-};
-class AndImplBackward_cpu
-    : public Registrable<AndImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
-};
+// Operator implementation entry point for the backend
+using AndImpl_cpu = OperatorImpl_cpu<And_Op,
+    void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>;
 
-class AndImpl_cpu : public OperatorImpl {
-public:
-    AndImpl_cpu(const And_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<AndImpl_cpu> create(const And_Op& op) {
-        return std::make_unique<AndImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-static Registrar<And_Op> registrarAndImpl_cpu("cpu", Aidge::AndImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(And_Op, "cpu", Aidge::AndImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_ANDIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/AndImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AndImpl_kernels.hpp
similarity index 61%
rename from include/aidge/backend/cpu/operator/AndImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/AndImpl_kernels.hpp
index c537863c..197e829f 100644
--- a/include/aidge/backend/cpu/operator/AndImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AndImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_ANDIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_ANDIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_ANDIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_ANDIMPL_KERNELS_H_
 
 #include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/operator/AndImpl.hpp"
@@ -45,20 +45,19 @@ void AndImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
     }
 }
 
-namespace {
-static Registrar<AndImplForward_cpu> registrarAndImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::AndImpl_cpu_forward_kernel<float, float, float>);
-static Registrar<AndImplForward_cpu> registrarAndImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::AndImpl_cpu_forward_kernel<double, double, double>);
-static Registrar<AndImplForward_cpu> registrarAndImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::AndImpl_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t>);
-static Registrar<AndImplForward_cpu> registrarAndImplForward_cpu_Int64(
-        {DataType::Int64, DataType::Int64, DataType::Int64},
-        Aidge::AndImpl_cpu_forward_kernel<std::int64_t, std::int64_t, std::int64_t>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(AndImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::AndImpl_cpu_forward_kernel<float, float, float>, nullptr});
+REGISTRAR(AndImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::AndImpl_cpu_forward_kernel<double, double, double>, nullptr});
+REGISTRAR(AndImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::AndImpl_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t>, nullptr});
+REGISTRAR(AndImpl_cpu,
+    {DataType::Int64},
+    {ProdConso::inPlaceModel, Aidge::AndImpl_cpu_forward_kernel<std::int64_t, std::int64_t, std::int64_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ANDIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_ANDIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/ArgMaxImpl.hpp b/include/aidge/backend/cpu/operator/ArgMaxImpl.hpp
index 72b9cc40..b1a2d516 100644
--- a/include/aidge/backend/cpu/operator/ArgMaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ArgMaxImpl.hpp
@@ -17,44 +17,22 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/ArgMax.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-class ArgMaxImplForward_cpu
-    : public Registrable<ArgMaxImplForward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(std::int32_t,
-                            DimSize_t,
-                            const std::vector<DimSize_t>&,
-                            const void *,
-                            void *)>> {};
-class ArgMaxImplBackward_cpu
-    : public Registrable<ArgMaxImplBackward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(std::int32_t,
-                            DimSize_t,
-                            const std::vector<DimSize_t>&,
-                            const void *,
-                            void *)>> {};
-
-class ArgMaxImpl_cpu : public OperatorImpl {
-   public:
-    ArgMaxImpl_cpu(const ArgMax_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<ArgMaxImpl_cpu> create(const ArgMax_Op &op) {
-        return std::make_unique<ArgMaxImpl_cpu>(op);
-    }
-
-   public:
-    void forward() override;
-};
-
-namespace {
-static Registrar<ArgMax_Op> registrarArgMaxImpl_cpu("cpu", Aidge::ArgMaxImpl_cpu::create);
-}  // namespace
+// Operator implementation entry point for the backend
+using ArgMaxImpl_cpu = OperatorImpl_cpu<ArgMax_Op,
+    void(std::int32_t,
+        DimSize_t,
+        const std::vector<DimSize_t>&,
+        const void *,
+        void *)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(ArgMax_Op, "cpu", Aidge::ArgMaxImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_ARGMAXIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ArgMaxImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ArgMaxImpl_kernels.hpp
similarity index 77%
rename from include/aidge/backend/cpu/operator/ArgMaxImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/ArgMaxImpl_kernels.hpp
index cea7d973..1bedec70 100644
--- a/include/aidge/backend/cpu/operator/ArgMaxImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ArgMaxImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_ARGMAXIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_ARGMAXIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_ARGMAXIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_ARGMAXIMPL_KERNELS_H_
 
 #include <algorithm>   // std::for_each
 #include <cstddef>     // std::size_t
@@ -72,14 +72,16 @@ void ArgMaxImpl_cpu_forward_kernel(std::int32_t axis_,
 
 }
 
-namespace {
-static Registrar<ArgMaxImplForward_cpu> registrarArgMaxImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ArgMaxImpl_cpu_forward_kernel<float, float>);
-static Registrar<ArgMaxImplForward_cpu> registrarArgMaxImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ArgMaxImpl_cpu_forward_kernel<int, int>);
-static Registrar<ArgMaxImplForward_cpu> registrarArgMaxImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ArgMaxImpl_cpu_forward_kernel<double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(ArgMaxImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::defaultModel, Aidge::ArgMaxImpl_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(ArgMaxImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::defaultModel, Aidge::ArgMaxImpl_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(ArgMaxImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::defaultModel, Aidge::ArgMaxImpl_cpu_forward_kernel<std::int32_t, std::int32_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ARGMAXIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_ARGMAXIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index 7fbd7cd9..adea96ca 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -17,49 +17,24 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
-// class AvgPooling_Op;
-
-// compute kernel registry for forward and backward
-class AvgPoolingImpl2DForward_cpu
-    : public Registrable<AvgPoolingImpl2DForward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 4>&,
-                            const void *,
-                            void *)>> {};
-class AvgPoolingImpl2DBackward_cpu
-    : public Registrable<AvgPoolingImpl2DBackward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 4>&,
-                            const void *,
-                            void *)>> {};
-
-class AvgPoolingImpl2D_cpu : public OperatorImpl {
-public:
-    AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<AvgPoolingImpl2D_cpu> create(const AvgPooling_Op<2> &op) {
-        return std::make_unique<AvgPoolingImpl2D_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-// add cpu backend to AvgPooling_Op<2> implementation registry
-static Registrar<AvgPooling_Op<2>> registrarAvgPoolingImpl2D_cpu("cpu", Aidge::AvgPoolingImpl2D_cpu::create);
-}  // namespace
+// Operator implementation entry point for the backend
+using AvgPooling2D_Op = AvgPooling_Op<2>;
+using AvgPoolingImpl2D_cpu = OperatorImpl_cpu<AvgPooling_Op<2>,
+    void(const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 4>&,
+        const void *,
+        void *)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(AvgPooling2D_Op, "cpu", Aidge::AvgPoolingImpl2D_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp
similarity index 85%
rename from include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp
index c7d9f862..f6da9dcb 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_KERNELS_H_
 
 #include <array>
 #include <tuple>
@@ -101,17 +101,16 @@ void AvgPoolingImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& strideD
     }
 }
 
-namespace {
-static Registrar<AvgPoolingImpl2DForward_cpu> registrarAvgPoolingImpl2DForward_cpu_Float32(
-        std::tuple<DataType, DataType>({DataType::Float32, DataType::Float32}),
-        Aidge::AvgPoolingImpl2D_cpu_forward_kernel<float, float>);
-static Registrar<AvgPoolingImpl2DForward_cpu> registrarAvgPoolingImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32},
-        Aidge::AvgPoolingImpl2D_cpu_forward_kernel<int, int>);
-static Registrar<AvgPoolingImpl2DForward_cpu> registrarAvgPoolingImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64},
-        Aidge::AvgPoolingImpl2D_cpu_forward_kernel<double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(AvgPoolingImpl2D_cpu,
+    {{DataType::Float32, DataFormat::NCHW}, {DataType::Float32, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::AvgPoolingImpl2D_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(AvgPoolingImpl2D_cpu,
+    {{DataType::Int32, DataFormat::NCHW}, {DataType::Int32, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::AvgPoolingImpl2D_cpu_forward_kernel<std::int32_t, std::int32_t>, nullptr});
+REGISTRAR(AvgPoolingImpl2D_cpu,
+    {{DataType::Float64, DataFormat::NCHW}, {DataType::Float64, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::AvgPoolingImpl2D_cpu_forward_kernel<double, double>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_AVGPOOLINGIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index 45860e33..36a100b2 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -17,58 +17,29 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
-// class BatchNorm_Op;
-
-// compute kernel registry for forward and backward
-class BatchNormImpl2DForward_cpu
-    : public Registrable<BatchNormImpl2DForward_cpu,
-                         std::tuple<DataType, DataType, DataType>,
-                         std::function<void(float,
-                            float,
-                            const std::array<DimSize_t, 4> &,
-                            const void *,
-                            const void *,
-                            const void *,
-                            void *,
-                            void *,
-                            void *,
-                            const bool)>> {};
-class BatchNormImpl2DBackward_cpu
-    : public Registrable<BatchNormImpl2DBackward_cpu,
-                         std::tuple<DataType, DataType, DataType>,
-                         std::function<void(float,
-                            float,
-                            const std::array<DimSize_t, 4> &,
-                            const void *,
-                            const void *,
-                            const void *,
-                            void *,
-                            void *,
-                            void *)>> {};
-
-class BatchNormImpl2D_cpu : public OperatorImpl {
-public:
-    BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<BatchNormImpl2D_cpu> create(const BatchNorm_Op<2> &op) {
-        return std::make_unique<BatchNormImpl2D_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-// add cpu backend to BatchNorm_Op<2> implementation registry
-static Registrar<BatchNorm_Op<2>> registrarBatchNormImpl2D_cpu("cpu", Aidge::BatchNormImpl2D_cpu::create);
-}  // namespace
+// Operator implementation entry point for the backend
+using BatchNorm2D_Op = BatchNorm_Op<2>;
+using BatchNormImpl2D_cpu = OperatorImpl_cpu<BatchNorm_Op<2>,
+    void(float,
+        float,
+        const std::array<DimSize_t, 4> &,
+        const void *,
+        const void *,
+        const void *,
+        void *,
+        void *,
+        void *,
+        const bool)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(BatchNorm2D_Op, "cpu", Aidge::BatchNormImpl2D_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp
similarity index 90%
rename from include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp
index 19f232a7..ec71e3b8 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_BATCHNORMIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_BATCHNORMIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_BATCHNORMIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_BATCHNORMIMPL_KERNELS_H_
 
 #include "aidge/utils/Registrar.hpp"
 
@@ -96,15 +96,10 @@ void BatchNormImpl2D_cpu_forward_kernel(float epsilon, float momentum, const std
     }
 }
 
-
-
-
-
-namespace {
-static Registrar<BatchNormImpl2DForward_cpu> registrarBatchNormImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::BatchNormImpl2D_cpu_forward_kernel<float, float, float>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(BatchNormImpl2D_cpu,
+    {{DataType::Float32, DataFormat::NCHW}, {DataType::Float32, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::BatchNormImpl2D_cpu_forward_kernel<float, float, float>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_BATCHNORMIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index 5e59d502..5b985acc 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -17,85 +17,39 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
-// class ConvDepthWise_Op;
-// compute kernel registry for forward and backward
-class ConvDepthWiseImpl1DForward_cpu
-    : public Registrable<ConvDepthWiseImpl1DForward_cpu,
-                         std::tuple<DataType, DataType, DataType, DataType>,
-                         std::function<void(const std::array<DimSize_t, 1>&,
-                            const std::array<DimSize_t, 1>&,
-                            const std::array<DimSize_t, 1>&,
-                            const std::array<DimSize_t, 3>&,
-                            const void *,
-                            const void *,
-                            const void *,
-                            void *)>> {};
-
-class ConvDepthWiseImpl1D_cpu : public OperatorImpl {
-public:
-    ConvDepthWiseImpl1D_cpu(const ConvDepthWise_Op<1> &op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<ConvDepthWiseImpl1D_cpu> create(const ConvDepthWise_Op<1> &op) {
-        return std::make_unique<ConvDepthWiseImpl1D_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-// add cpu backend to ConvDepthWise_Op<1> implementation registry
-static Registrar<ConvDepthWise_Op<1>> registrarConvDepthWiseImpl1D_cpu("cpu", Aidge::ConvDepthWiseImpl1D_cpu::create);
-}  // namespace
-
-// compute kernel registry for forward and backward
-class ConvDepthWiseImpl2DForward_cpu
-    : public Registrable<ConvDepthWiseImpl2DForward_cpu,
-                         std::tuple<DataType, DataType, DataType, DataType>,
-                         std::function<void(const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 4> &,
-                            const void *,
-                            const void *,
-                            const void *,
-                            void *)>> {};
-class ConvDepthWiseImpl2DBackward_cpu
-    : public Registrable<ConvDepthWiseImpl2DBackward_cpu,
-                         std::tuple<DataType, DataType, DataType, DataType>,
-                         std::function<void(const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            bool,
-                            const std::array<DimSize_t, 4> &,
-                            const void *,
-                            const void *,
-                            const void *,
-                            void *)>> {};
-
-class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
-public:
-    ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<ConvDepthWiseImpl2D_cpu> create(const ConvDepthWise_Op<2> &op) {
-        return std::make_unique<ConvDepthWiseImpl2D_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-// add cpu backend to ConvDepthWise_Op<2> implementation registry
-static Registrar<ConvDepthWise_Op<2>> registrarConvDepthWiseImpl2D_cpu("cpu", Aidge::ConvDepthWiseImpl2D_cpu::create);
-}  // namespace
+// Operator implementation entry point for the backend
+using ConvDepthWise1D_Op = ConvDepthWise_Op<1>;
+using ConvDepthWiseImpl1D_cpu = OperatorImpl_cpu<ConvDepthWise_Op<1>,
+    void(const std::array<DimSize_t, 1>&,
+        const std::array<DimSize_t, 1>&,
+        const std::array<DimSize_t, 1>&,
+        const std::array<DimSize_t, 3>&,
+        const void *,
+        const void *,
+        const void *,
+        void *)>;
+
+using ConvDepthWise2D_Op = ConvDepthWise_Op<2>;
+using ConvDepthWiseImpl2D_cpu = OperatorImpl_cpu<ConvDepthWise_Op<2>,
+    void(const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 4> &,
+        const void *,
+        const void *,
+        const void *,
+        void *)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(ConvDepthWise1D_Op, "cpu", Aidge::ConvDepthWiseImpl1D_cpu::create);
+REGISTRAR(ConvDepthWise2D_Op, "cpu", Aidge::ConvDepthWiseImpl2D_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp
similarity index 83%
rename from include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp
index a02aa672..ff9bb148 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_KERNELS_H_
 
 #include <algorithm>
 #include <array>
@@ -86,17 +86,16 @@ void ConvDepthWiseImpl1D_cpu_forward_kernel(const std::array<DimSize_t, 1>& stri
     }
 }
 
-namespace {
-static Registrar<ConvDepthWiseImpl1DForward_cpu> registrarConvDepthWiseImpl1DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<float, float, float, float>);
-static Registrar<ConvDepthWiseImpl1DForward_cpu> registrarConvDepthWiseImpl1DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t, std::int32_t>);
-static Registrar<ConvDepthWiseImpl1DForward_cpu> registrarConvDepthWiseImpl1DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<double, double, double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(ConvDepthWiseImpl1D_cpu,
+    {{DataType::Any, DataFormat::NCHW}, {DataType::Float32, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<float, float, float, float>, nullptr});
+REGISTRAR(ConvDepthWiseImpl1D_cpu,
+    {{DataType::Any, DataFormat::NCHW}, {DataType::Int32, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t, std::int32_t>, nullptr});
+REGISTRAR(ConvDepthWiseImpl1D_cpu,
+    {{DataType::Any, DataFormat::NCHW}, {DataType::Float64, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::ConvDepthWiseImpl1D_cpu_forward_kernel<double, double, double, double>, nullptr});
 
 
 /**
@@ -187,17 +186,16 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& stri
     }
 }
 
-namespace {
-static Registrar<ConvDepthWiseImpl2DForward_cpu> registrarConvDepthWiseImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<float, float, float, float>);
-static Registrar<ConvDepthWiseImpl2DForward_cpu> registrarConvDepthWiseImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t, std::int32_t>);
-static Registrar<ConvDepthWiseImpl2DForward_cpu> registrarConvDepthWiseImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<double, double, double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(ConvDepthWiseImpl2D_cpu,
+    {{DataType::Any, DataFormat::NCHW}, {DataType::Float32, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<float, float, float, float>, nullptr});
+REGISTRAR(ConvDepthWiseImpl2D_cpu,
+    {{DataType::Any, DataFormat::NCHW}, {DataType::Int32, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t, std::int32_t>, nullptr});
+REGISTRAR(ConvDepthWiseImpl2D_cpu,
+    {{DataType::Any, DataFormat::NCHW}, {DataType::Float64, DataFormat::NCHW}},
+    {ProdConso::inPlaceModel, Aidge::ConvDepthWiseImpl2D_cpu_forward_kernel<double, double, double, double>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_CONVDEPTHWISEIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index 740d7265..c06d0912 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -28,26 +28,26 @@ namespace Aidge {
 using Conv1D_Op = Conv_Op<1>;
 using ConvImpl1D_cpu = OperatorImpl_cpu<Conv_Op<1>,
     void(const std::array<DimSize_t, 1>&,
-                            const std::array<DimSize_t, 1>&,
-                            const std::array<DimSize_t, 1>&,
-                            const std::array<DimSize_t, 3> &,
-                            DimSize_t,
-                            const void *,
-                            const void *,
-                            const void *,
-                            void *)>;
+        const std::array<DimSize_t, 1>&,
+        const std::array<DimSize_t, 1>&,
+        const std::array<DimSize_t, 3> &,
+        DimSize_t,
+        const void *,
+        const void *,
+        const void *,
+        void *)>;
 
 using Conv2D_Op = Conv_Op<2>;
 using ConvImpl2D_cpu = OperatorImpl_cpu<Conv_Op<2>,
     void(const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 4> &,
-                            DimSize_t,
-                            const void *,
-                            const void *,
-                            const void *,
-                            void *)>;
+        const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 4> &,
+        DimSize_t,
+        const void *,
+        const void *,
+        const void *,
+        void *)>;
 
 // Implementation entry point registration to Operator
 REGISTRAR(Conv1D_Op, "cpu", Aidge::ConvImpl1D_cpu::create);
diff --git a/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp
index 1b37f74d..7fbce5ed 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp
@@ -92,6 +92,7 @@ void ConvImpl1D_cpu_forward_kernel(const std::array<DimSize_t, 1>& strideDims,
     }
 }
 
+// Kernels registration to implementation entry point
 REGISTRAR(ConvImpl1D_cpu,
     {{DataType::Any, DataFormat::NCHW}, {DataType::Float32, DataFormat::NCHW}},
     {ProdConso::inPlaceModel, Aidge::ConvImpl1D_cpu_forward_kernel<float, float, float, float>, nullptr});
@@ -197,6 +198,7 @@ void ConvImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& strideDims,
     }
 }
 
+// Kernels registration to implementation entry point
 REGISTRAR(ConvImpl2D_cpu,
     {{DataType::Any, DataFormat::NCHW}, {DataType::Float32, DataFormat::NCHW}},
     {ProdConso::inPlaceModel, Aidge::ConvImpl2D_cpu_forward_kernel<float, float, float, float>, nullptr});
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index c969e19e..40c1b678 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -16,38 +16,18 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/Div.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+// Operator implementation entry point for the backend
+using DivImpl_cpu = OperatorImpl_cpu<Div_Op,
+    void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)>;
 
-// compute kernel registry for forward and backward
-class DivImplForward_cpu
-    // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
-    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)>> {
-};
-class DivImplBackward_cpu
-    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
-};
-
-class DivImpl_cpu : public OperatorImpl {
-public:
-    DivImpl_cpu(const Div_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<DivImpl_cpu> create(const Div_Op& op) {
-        return std::make_unique<DivImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-
-    void forward() override final;
-};
-
-namespace {
-static Registrar<Div_Op> registrarDivImpl_cpu("cpu", Aidge::DivImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(Div_Op, "cpu", Aidge::DivImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_DIVIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/DivImpl_kernels.hpp
similarity index 77%
rename from include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/DivImpl_kernels.hpp
index 74db1128..ed6e55a7 100644
--- a/include/aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_DIVIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_DIVIMPL_KERNELS_H_
 
 #include <numeric>     // std::accumulate
 #include <cstddef>     // std::size_t
@@ -69,19 +69,16 @@ constexpr void DivImpl_cpu_forward_kernel(const std::size_t input1size_,
     }
 }
 
-
-
-namespace {
-static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::DivImpl_cpu_forward_kernel<float, float, float>);
-static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::DivImpl_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t>);
-static Registrar<DivImplForward_cpu> registrarDivImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::DivImpl_cpu_forward_kernel<double, double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(DivImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::DivImpl_cpu_forward_kernel<float, float, float>, nullptr});
+REGISTRAR(DivImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::DivImpl_cpu_forward_kernel<double, double, double>, nullptr});
+REGISTRAR(DivImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::DivImpl_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_DIVIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_DIVIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp
index 11aed23d..3d283560 100644
--- a/include/aidge/backend/cpu/operator/ErfImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CPU_OPERATOR_ERFIMPL_H_
 #define AIDGE_CPU_OPERATOR_ERFIMPL_H_
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/Erf.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -20,31 +20,12 @@
 #include <vector>
 
 namespace Aidge {
-// class Erf_Op;
+// Operator implementation entry point for the backend
+using ErfImpl_cpu = OperatorImpl_cpu<Erf_Op,
+    void(const std::size_t, const void*, void*)>;
 
-// compute kernel registry for forward and backward
-class ErfImplForward_cpu
-    : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
-};
-class ErfImplBackward_cpu
-    : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
-};
-
-class ErfImpl_cpu : public OperatorImpl {
-public:
-    ErfImpl_cpu(const Erf_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<ErfImpl_cpu> create(const Erf_Op& op) {
-        return std::make_unique<ErfImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-static Registrar<Erf_Op> registrarErfImpl_cpu("cpu", Aidge::ErfImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(Erf_Op, "cpu", Aidge::ErfImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_ERFIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp
similarity index 57%
rename from include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp
index bb92401b..02041f55 100644
--- a/include/aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/ErfImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_ERFIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_ERFIMPL_KERNELS_H_
 
 #include <cmath>
 
@@ -32,14 +32,16 @@ void ErfImpl_cpu_forward_kernel(std::size_t inputLenght,
     }
 }
 
-namespace {
-static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ErfImpl_cpu_forward_kernel<float, float>);
-static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ErfImpl_cpu_forward_kernel<int, int>);
-static Registrar<ErfImplForward_cpu> registrarErfImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ErfImpl_cpu_forward_kernel<double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(ErfImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::ErfImpl_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(ErfImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::ErfImpl_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(ErfImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::ErfImpl_cpu_forward_kernel<std::int32_t, std::int32_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_ERFIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index fefd88b4..e82352d9 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -16,57 +16,33 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-// class FC_Op;
-
-// compute kernel registry for forward and backward
-class FCImplForward_cpu : public Registrable<FCImplForward_cpu,
-                                             std::tuple<DataType,
-                                                        DataType,
-                                                        DataType,
-                                                        DataType>,
-                                             std::function<void(const DimSize_t,
-                                                const DimSize_t,
-                                                const DimSize_t,
-                                                const void *,
-                                                const void *,
-                                                const void *,
-                                                void *)>> {};
-class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
-                                              std::tuple<DataType,
-                                                         DataType,
-                                                         DataType,
-                                                         DataType>,
-                                              std::function<void(const DimSize_t,
-                                                const DimSize_t,
-                                                const DimSize_t,
-                                                const void *,
-                                                const void *,
-                                                const void *,
-                                                void *,
-                                                void *,
-                                                void *)>> {};
-
-class FCImpl_cpu : public OperatorImpl {
-public:
-    FCImpl_cpu(const FC_Op &op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) {
-        return std::make_unique<FCImpl_cpu>(op);
-    }
-
-    void forward() override final;
-    void backward() override final;
-};
-
-namespace {
-static Registrar<FC_Op> registrarFCImpl_cpu("cpu", Aidge::FCImpl_cpu::create);
-}
+// Operator implementation entry point for the backend
+using FCImpl_cpu = OperatorImpl_cpu<FC_Op,
+    void(const DimSize_t,
+        const DimSize_t,
+        const DimSize_t,
+        const void *,
+        const void *,
+        const void *,
+        void *),
+    void(const DimSize_t,
+        const DimSize_t,
+        const DimSize_t,
+        const void *,
+        const void *,
+        const void *,
+        void *,
+        void *,
+        void *)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(FC_Op, "cpu", Aidge::FCImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_FCIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
deleted file mode 100644
index c93a44d9..00000000
--- a/include/aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_
-
-#include "aidge/utils/Registrar.hpp"
-#include <algorithm>
-
-#include "aidge/backend/cpu/operator/FCImpl.hpp"
-
-namespace Aidge {
-template <class I, class O, class W, class B>
-void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
-                                const DimSize_t inputFeatureSize,
-                                const DimSize_t outputFeatureSize,
-                                const void* input_,
-                                const void* originalInput_,
-                                const void* weight_,
-                                void* output_,
-                                void* weightGrad_,
-                                void* biasesGrad_)
-{
-    // FIXME: missing FC attributes as arguments
-    const I* input  = static_cast<const I*>(input_);
-    const I* originalInput  = static_cast<const I*>(originalInput_);
-    const W* weight = static_cast<const W*>(weight_);
-    O* output       = static_cast<O*>(output_);
-    W* weightGrad   = static_cast<W*>(weightGrad_);
-    B* biasesGrad   = static_cast<B*>(biasesGrad_);
-
-
-    // bias grad
-    if (biasesGrad == nullptr) { // no bias
-        std::fill(biasesGrad, biasesGrad + outputFeatureSize, B(0));
-    } else {
-        for (std::size_t o = 0; o < outputFeatureSize; ++o) { // nb outputs
-            B sum{0};
-            for (std::size_t b = 0; b < batchSize; ++b) {
-                sum += input[b*outputFeatureSize + o];
-            }
-            biasesGrad[o] = sum;
-        }
-    }
-
-    // weight grad
-    for (std::size_t o = 0; o < outputFeatureSize; ++o) {
-        for (std::size_t c = 0; c < inputFeatureSize; ++c) {
-            W sum{0};
-            for (std::size_t b = 0; b < batchSize; ++b) {
-                sum += originalInput[b*inputFeatureSize + c]*input[b*outputFeatureSize + o];
-            }
-            weightGrad[o*inputFeatureSize + c] = sum;
-        }
-    }
-
-    // input grad
-    for (std::size_t b = 0; b < batchSize; ++b) {
-        for (std::size_t c = 0; c < inputFeatureSize; ++c) {
-            O sum{0};
-            for (std::size_t o = 0; o < outputFeatureSize; ++o) {
-                sum += weight[o*inputFeatureSize + c] * input[b*outputFeatureSize + o];
-            }
-            output[b*inputFeatureSize + c] = sum;
-        }
-    }
-}
-
-
-namespace {
-static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::FCImpl_cpu_backward_kernel<float, float, float, float>);
-static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::FCImpl_cpu_backward_kernel<int, int, int, int>);
-static Registrar<FCImplBackward_cpu> registrarFCImpl2DBackward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::FCImpl_cpu_backward_kernel<double, double, double, double>);
-}  // namespace
-
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_FCIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
similarity index 63%
rename from include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
index caeacd1b..c7380a93 100644
--- a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_FCIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_FCIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_FCIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_FCIMPL_KERNELS_H_
 
 #include <algorithm>
 
@@ -115,19 +115,72 @@ void FCImpl_cpu_forward_kernel(const DimSize_t batchSize,
     }
 }
 
+template <class I, class O, class W, class B>
+void FCImpl_cpu_backward_kernel(const DimSize_t batchSize,
+                                const DimSize_t inputFeatureSize,
+                                const DimSize_t outputFeatureSize,
+                                const void* input_,
+                                const void* originalInput_,
+                                const void* weight_,
+                                void* output_,
+                                void* weightGrad_,
+                                void* biasesGrad_)
+{
+    // FIXME: missing FC attributes as arguments
+    const I* input  = static_cast<const I*>(input_);
+    const I* originalInput  = static_cast<const I*>(originalInput_);
+    const W* weight = static_cast<const W*>(weight_);
+    O* output       = static_cast<O*>(output_);
+    W* weightGrad   = static_cast<W*>(weightGrad_);
+    B* biasesGrad   = static_cast<B*>(biasesGrad_);
+
+
+    // bias grad
+    if (biasesGrad == nullptr) { // no bias
+        std::fill(biasesGrad, biasesGrad + outputFeatureSize, B(0));
+    } else {
+        for (std::size_t o = 0; o < outputFeatureSize; ++o) { // nb outputs
+            B sum{0};
+            for (std::size_t b = 0; b < batchSize; ++b) {
+                sum += input[b*outputFeatureSize + o];
+            }
+            biasesGrad[o] = sum;
+        }
+    }
 
-namespace {
-static Registrar<FCImplForward_cpu> registrarFCImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::FCImpl_cpu_forward_kernel<float, float, float, float>);
-static Registrar<FCImplForward_cpu> registrarFCImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::FCImpl_cpu_forward_kernel<int, int, int, int>);
-static Registrar<FCImplForward_cpu> registrarFCImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::FCImpl_cpu_forward_kernel<double, double, double, double>);
-}  // namespace
+    // weight grad
+    for (std::size_t o = 0; o < outputFeatureSize; ++o) {
+        for (std::size_t c = 0; c < inputFeatureSize; ++c) {
+            W sum{0};
+            for (std::size_t b = 0; b < batchSize; ++b) {
+                sum += originalInput[b*inputFeatureSize + c]*input[b*outputFeatureSize + o];
+            }
+            weightGrad[o*inputFeatureSize + c] = sum;
+        }
+    }
+
+    // input grad
+    for (std::size_t b = 0; b < batchSize; ++b) {
+        for (std::size_t c = 0; c < inputFeatureSize; ++c) {
+            O sum{0};
+            for (std::size_t o = 0; o < outputFeatureSize; ++o) {
+                sum += weight[o*inputFeatureSize + c] * input[b*outputFeatureSize + o];
+            }
+            output[b*inputFeatureSize + c] = sum;
+        }
+    }
+}
 
+// Kernels registration to implementation entry point
+REGISTRAR(FCImpl_cpu,
+    {{DataType::Any}, {DataType::Float32}},
+    {ProdConso::defaultModel, Aidge::FCImpl_cpu_forward_kernel<float, float, float, float>, Aidge::FCImpl_cpu_backward_kernel<float, float, float, float>});
+REGISTRAR(FCImpl_cpu,
+    {{DataType::Any}, {DataType::Float64}},
+    {ProdConso::defaultModel, Aidge::FCImpl_cpu_forward_kernel<double, double, double, double>, Aidge::FCImpl_cpu_backward_kernel<double, double, double, double>});
+REGISTRAR(FCImpl_cpu,
+    {{DataType::Any}, {DataType::Int32}},
+    {ProdConso::defaultModel, Aidge::FCImpl_cpu_forward_kernel<int, int, int, int>, Aidge::FCImpl_cpu_backward_kernel<int, int, int, int>});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_FCIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_FCIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/FoldImpl.hpp b/include/aidge/backend/cpu/operator/FoldImpl.hpp
index a5b33225..94ddbdcb 100644
--- a/include/aidge/backend/cpu/operator/FoldImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FoldImpl.hpp
@@ -17,39 +17,26 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/Fold.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
-class FoldImpl2DForward_cpu
-    : public Registrable<FoldImpl2DForward_cpu,
-                         std::tuple<DataType, DataType>,
-                         std::function<void(const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const std::vector<DimSize_t> &,
-                            const void *,
-                            void *)>> {};
-
-class FoldImpl2D_cpu : public OperatorImpl {
-public:
-    FoldImpl2D_cpu(const Fold_Op<2> &op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<FoldImpl2D_cpu> create(const Fold_Op<2> &op) {
-        return std::make_unique<FoldImpl2D_cpu>(op);
-    }
-
-    void forward() override;
-};
-
-namespace {
-// add cpu backend to Fold_Op<2> implementation registry
-static Registrar<Fold_Op<2>> registrarFoldImpl2D_cpu("cpu", Aidge::FoldImpl2D_cpu::create);
-}  // namespace
+// Operator implementation entry point for the backend
+using Fold2D_Op = Fold_Op<2>;
+using FoldImpl2D_cpu = OperatorImpl_cpu<Fold_Op<2>,
+    void(const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 2>&,
+        const std::array<DimSize_t, 2>&,
+        const std::vector<DimSize_t> &,
+        const void *,
+        void *)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(Fold2D_Op, "cpu", Aidge::FoldImpl2D_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_FOLDIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FoldImpl_kernels.hpp
similarity index 80%
rename from include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/FoldImpl_kernels.hpp
index 3dba2319..0e2643ac 100644
--- a/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/FoldImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_FOLDIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_FOLDIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_FOLDIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_FOLDIMPL_KERNELS_H_
 
 #include "aidge/utils/Registrar.hpp"
 
@@ -71,17 +71,16 @@ void FoldImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& outputDims,
     }
 }
 
-namespace {
-static Registrar<FoldImpl2DForward_cpu> registrarFoldImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32},
-        Aidge::FoldImpl2D_cpu_forward_kernel<float, float>);
-static Registrar<FoldImpl2DForward_cpu> registrarFoldImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32},
-        Aidge::FoldImpl2D_cpu_forward_kernel<int, int>);
-static Registrar<FoldImpl2DForward_cpu> registrarFoldImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64},
-        Aidge::FoldImpl2D_cpu_forward_kernel<double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(FoldImpl2D_cpu,
+    {DataType::Float32},
+    {ProdConso::defaultModel, Aidge::FoldImpl2D_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(FoldImpl2D_cpu,
+    {DataType::Float64},
+    {ProdConso::defaultModel, Aidge::FoldImpl2D_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(FoldImpl2D_cpu,
+    {DataType::Int32},
+    {ProdConso::defaultModel, Aidge::FoldImpl2D_cpu_forward_kernel<int, int>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_FOLDIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_FOLDIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
index 6ce10711..4e04b1a5 100644
--- a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
@@ -15,41 +15,18 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/GlobalAveragePooling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-// class GlobalAveragePooling_Op;
+// Operator implementation entry point for the backend
+using GlobalAveragePoolingImpl_cpu = OperatorImpl_cpu<GlobalAveragePooling_Op,
+    void(const std::vector<DimSize_t> &, const void *, void *)>;
 
-class GlobalAveragePoolingImplForward_cpu
-    : public Registrable<
-          GlobalAveragePoolingImplForward_cpu, std::tuple<DataType, DataType>,
-          std::function<void(const std::vector<DimSize_t> &, const void *, void *)>> {};
-
-class GlobalAveragePoolingImplBackward_cpu
-    : public Registrable<
-          GlobalAveragePoolingImplBackward_cpu, std::tuple<DataType, DataType>,
-          std::function<void(const std::vector<DimSize_t> &, const void *, void *)>> {};
-
-class GlobalAveragePoolingImpl_cpu : public OperatorImpl {
-public:
-  GlobalAveragePoolingImpl_cpu(const GlobalAveragePooling_Op &op)
-      : OperatorImpl(op, "cpu") {}
-
-  static std::unique_ptr<GlobalAveragePoolingImpl_cpu>
-  create(const GlobalAveragePooling_Op &op) {
-    return std::make_unique<GlobalAveragePoolingImpl_cpu>(op);
-  }
-
-  void forward() override;
-};
-
-namespace {
-static Registrar<GlobalAveragePooling_Op> registrarGlobalAveragePoolingImpl_cpu(
-    "cpu", Aidge::GlobalAveragePoolingImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(GlobalAveragePooling_Op, "cpu", Aidge::GlobalAveragePoolingImpl_cpu::create);
 } // namespace Aidge
 
 #endif /* _AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp
similarity index 68%
rename from include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp
index 81f10975..8042ca8e 100644
--- a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_KERNELS_H_
 
 #include <cstddef>
 #include <functional>  // std::multiplies
@@ -59,21 +59,16 @@ void GlobalAveragePoolingImpl_cpu_forward_kernel(
   }
 }
 
-// Then we add the Registrar declaration for different input/output types
-namespace {
-static Registrar<GlobalAveragePoolingImplForward_cpu>
-    registrarGlobalAveragePoolingImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32},
-        Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<float, float>);
-static Registrar<GlobalAveragePoolingImplForward_cpu>
-    registrarGlobalAveragePoolingImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32},
-        Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<int, int>);
-static Registrar<GlobalAveragePoolingImplForward_cpu>
-    registrarGlobalAveragePoolingImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64},
-        Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<double, double>);
-} // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(GlobalAveragePoolingImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::defaultModel, Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(GlobalAveragePoolingImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::defaultModel, Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(GlobalAveragePoolingImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::defaultModel, Aidge::GlobalAveragePoolingImpl_cpu_forward_kernel<int, int>, nullptr});
 } // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_GLOBALAVERAGEPOOLINGIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index a3e95540..1e8c1a14 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -16,47 +16,26 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
-// compute kernel registry for forward and backward
-class LeakyReLUImplForward_cpu
-    : public Registrable<LeakyReLUImplForward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(const float,
-                            std::size_t,
-                            const void*,
-                            void*)>> {};
-class LeakyReLUImplBackward_cpu
-    : public Registrable<LeakyReLUImplBackward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(const float,
-                            std::size_t,
-                            const void*,
-                            void*)>> {};
-
-class LeakyReLUImpl_cpu : public OperatorImpl {
-public:
-    LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<LeakyReLUImpl_cpu> create(const LeakyReLU_Op& op) {
-        return std::make_unique<LeakyReLUImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-
-    void forward() override final;
-
-    void backward() override final;
-};
-
-namespace {
-static Registrar<LeakyReLU_Op> registrarLeakyReLUImpl_cpu("cpu", Aidge::LeakyReLUImpl_cpu::create);
-}
+// Operator implementation entry point for the backend
+using LeakyReLUImpl_cpu = OperatorImpl_cpu<LeakyReLU_Op,
+    void(const float,
+        std::size_t,
+        const void*,
+        void*),
+    void(const float,
+        std::size_t,
+        const void*,
+        void*)>;
+
+// Implementation entry point registration to Operator
+REGISTRAR(LeakyReLU_Op, "cpu", Aidge::LeakyReLUImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp
deleted file mode 100644
index e308d940..00000000
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_
-
-#include "aidge/utils/Registrar.hpp"
-
-#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
-
-namespace Aidge {
-template <class I, class O>
-void LeakyReLUImpl_cpu_backward_kernel(const float negativeSlope_,
-                                     std::size_t inputLenght,
-                                     const void* input_,
-                                     void* output_) {
-
-    const I* input = static_cast<const I*>(input_);
-    O* output = static_cast<O*>(output_);
-    const I negativeSlope = static_cast<const I>(negativeSlope_);
-
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        output[i] = (input[i] > 0) ? input[i] : negativeSlope*input[i];
-    }
-}
-
-namespace {
-static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<float, float>);
-static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::LeakyReLUImpl_cpu_backward_kernel<int, int>);
-static Registrar<LeakyReLUImplBackward_cpu> registrarLeakyReLUImplBackward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::LeakyReLUImpl_cpu_backward_kernel<double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp
deleted file mode 100644
index 450d0bf4..00000000
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_FORWARD_KERNEL_H_
-
-#include "aidge/utils/Registrar.hpp"
-
-#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
-
-namespace Aidge {
-template <class I, class O>
-void LeakyReLUImpl_cpu_forward_kernel(const float negativeSlope_,
-                                     std::size_t inputLenght,
-                                     const void* input_,
-                                     void* output_) {
-
-    const I* input = static_cast<const I*>(input_);
-    O* output = static_cast<O*>(output_);
-    const I negativeSlope = static_cast<const I>(negativeSlope_);
-
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        output[i] = (input[i] >= 0) ? input[i] : input[i] * negativeSlope;
-    }
-}
-
-namespace {
-static Registrar<LeakyReLUImplForward_cpu> registrarLeakyReLUImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::LeakyReLUImpl_cpu_forward_kernel<float, float>);
-static Registrar<LeakyReLUImplForward_cpu> registrarLeakyReLUImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::LeakyReLUImpl_cpu_forward_kernel<int, int>);
-static Registrar<LeakyReLUImplForward_cpu> registrarLeakyReLUImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::LeakyReLUImpl_cpu_forward_kernel<double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp
new file mode 100644
index 00000000..1f0f9fee
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_KERNELS_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void LeakyReLUImpl_cpu_forward_kernel(const float negativeSlope_,
+                                     std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+    const I negativeSlope = static_cast<const I>(negativeSlope_);
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = (input[i] >= 0) ? input[i] : input[i] * negativeSlope;
+    }
+}
+
+template <class I, class O>
+void LeakyReLUImpl_cpu_backward_kernel(const float negativeSlope_,
+                                     std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+    const I negativeSlope = static_cast<const I>(negativeSlope_);
+
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = (input[i] > 0) ? input[i] : negativeSlope*input[i];
+    }
+}
+
+// Kernels registration to implementation entry point
+REGISTRAR(LeakyReLUImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::LeakyReLUImpl_cpu_forward_kernel<float, float>, Aidge::LeakyReLUImpl_cpu_backward_kernel<float, float>});
+REGISTRAR(LeakyReLUImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::LeakyReLUImpl_cpu_forward_kernel<double, double>, Aidge::LeakyReLUImpl_cpu_backward_kernel<double, double>});
+REGISTRAR(LeakyReLUImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::LeakyReLUImpl_cpu_forward_kernel<int, int>, Aidge::LeakyReLUImpl_cpu_backward_kernel<int, int>});
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_LEAKYRELUIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/LnImpl.hpp b/include/aidge/backend/cpu/operator/LnImpl.hpp
index 0c7b9709..d48a7ae4 100755
--- a/include/aidge/backend/cpu/operator/LnImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LnImpl.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CPU_OPERATOR_LNIMPL_H_
 #define AIDGE_CPU_OPERATOR_LNIMPL_H_
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/Ln.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -21,34 +21,13 @@
 #include <vector>
 
 namespace Aidge {
-// class Ln_Op;
+// Operator implementation entry point for the backend
+using LnImpl_cpu = OperatorImpl_cpu<Ln_Op,
+    void(const std::size_t, const void*, void*),
+    void(const std::size_t, const void*, const void*, void*)>;
 
-// compute kernel registry for forward and backward
-class LnImplForward_cpu
-    : public Registrable<LnImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
-};
-class LnImplBackward_cpu
-    : public Registrable<LnImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const void*, const void*, void*)>> {
-};
-
-class LnImpl_cpu : public OperatorImpl {
-public:
-    LnImpl_cpu(const Ln_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<LnImpl_cpu> create(const Ln_Op& op) {
-        return std::make_unique<LnImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-	
-    void forward() override final;
-
-    void backward() override final;
-};
-
-namespace {
-static Registrar<Ln_Op> registrarLnImpl_cpu("cpu", Aidge::LnImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(Ln_Op, "cpu", Aidge::LnImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_LNIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp
deleted file mode 100755
index ebb97551..00000000
--- a/include/aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp
+++ /dev/null
@@ -1,47 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_LNIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_LNIMPL_FORWARD_KERNEL_H_
-
-#include "aidge/utils/Registrar.hpp"
-
-#include "aidge/backend/cpu/operator/LnImpl.hpp"
-
-namespace Aidge {
-template <class I, class O>
-void LnImpl_cpu_forward_kernel(std::size_t inputLenght,
-                               const void* input_,
-                               void* output_) {
-
-    const I* input = static_cast<const I*>(input_);
-    O* output = static_cast<O*>(output_);
-	const float eps = 1.0e-20f;
-
-//#pragma omp parallel for if (inputLenght > 1024)
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-		if (input[i] > I(eps)) {
-			output[i] = std::log(input[i]);
-		} else {
-			output[i] = std::log(I(eps));
-		}
-    }
-}
-
-namespace {
-static Registrar<LnImplForward_cpu> registrarLnImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::LnImpl_cpu_forward_kernel<float, float>);
-static Registrar<LnImplForward_cpu> registrarLnImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::LnImpl_cpu_forward_kernel<double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_LNIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/LnImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/LnImpl_kernels.hpp
similarity index 50%
rename from include/aidge/backend/cpu/operator/LnImpl_backward_kernels.hpp
rename to include/aidge/backend/cpu/operator/LnImpl_kernels.hpp
index 5fb82e35..b30b05bb 100755
--- a/include/aidge/backend/cpu/operator/LnImpl_backward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/LnImpl_kernels.hpp
@@ -1,50 +1,67 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_LNIMPL_BACKWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_LNIMPL_BACKWARD_KERNEL_H_
-
-#include <cstddef>  // std::size_t
-
-#include "aidge/backend/cpu/operator/LnImpl.hpp"
-#include "aidge/utils/Registrar.hpp"
-
-namespace Aidge {
-template <class I, class GI, class GO>
-void LnImpl_cpu_backward_kernel(const std::size_t inputLenght,
-                                const void* input_, const void* grad_output_,
-	                            void* grad_input_) {
-						 
-    const I* input = static_cast<const I*>(input_);
-    const GO* grad_output = static_cast<const GO*>(grad_output_);
-    GI* grad_input = static_cast<GI*>(grad_input_);
-	const float eps = 1.0e-20f;
-	
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-		if (input[i] > I(eps)) {
-			grad_input[i] = grad_output[i] / input[i];
-		} else {
-			grad_input[i] = GI(0);
-		}
-    }
-}
-
-namespace {
-static Registrar<LnImplBackward_cpu> registrarLnImplBackward_cpu_Float32(
-    {DataType::Float32, DataType::Float32, DataType::Float32},
-    Aidge::LnImpl_cpu_backward_kernel<float, float, float>);	
-static Registrar<LnImplBackward_cpu> registrarLnImplBackward_cpu_Float64(
-    {DataType::Float64, DataType::Float64, DataType::Float64},
-    Aidge::LnImpl_cpu_backward_kernel<double, double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_LNIMPL_BACKWARD_KERNEL_H_ */
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_LNIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_LNIMPL_KERNELS_H_
+
+#include "aidge/utils/Registrar.hpp"
+
+#include "aidge/backend/cpu/operator/LnImpl.hpp"
+
+namespace Aidge {
+template <class I, class O>
+void LnImpl_cpu_forward_kernel(std::size_t inputLenght,
+                               const void* input_,
+                               void* output_) {
+
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+	const float eps = 1.0e-20f;
+
+//#pragma omp parallel for if (inputLenght > 1024)
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+		if (input[i] > I(eps)) {
+			output[i] = std::log(input[i]);
+		} else {
+			output[i] = std::log(I(eps));
+		}
+    }
+}
+
+template <class I, class GI, class GO>
+void LnImpl_cpu_backward_kernel(const std::size_t inputLenght,
+                                const void* input_, const void* grad_output_,
+	                            void* grad_input_) {
+						 
+    const I* input = static_cast<const I*>(input_);
+    const GO* grad_output = static_cast<const GO*>(grad_output_);
+    GI* grad_input = static_cast<GI*>(grad_input_);
+	const float eps = 1.0e-20f;
+	
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+		if (input[i] > I(eps)) {
+			grad_input[i] = grad_output[i] / input[i];
+		} else {
+			grad_input[i] = GI(0);
+		}
+    }
+}
+
+// Kernels registration to implementation entry point
+REGISTRAR(LnImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::LnImpl_cpu_forward_kernel<float, float>, Aidge::LnImpl_cpu_backward_kernel<float, float, float>});
+REGISTRAR(LnImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::LnImpl_cpu_forward_kernel<double, double>, Aidge::LnImpl_cpu_backward_kernel<double, double, double>});
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_LNIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index 957002de..c07aa5f8 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -16,37 +16,20 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/MatMul.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
+// Operator implementation entry point for the backend
+using MatMulImpl_cpu = OperatorImpl_cpu<MatMul_Op,
+    void(const std::size_t, const std::size_t, const std::size_t,
+                              const void *, const void *, void *)>;
 
-class MatMulImplForward_cpu
-    : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType>,
-                         std::function<void(const std::size_t, const std::size_t, const std::size_t,
-                              const void *, const void *, void *)>> {};
-class MatMulImplBackward_cpu
-    : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType>,
-                         std::function<void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&,
-                              const void *, const void *, void *)>> {};
-
-class MatMulImpl_cpu : public OperatorImpl {
-public:
-    MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op) {
-        return std::make_unique<MatMulImpl_cpu>(op);
-    }
-
-    void forward() override;
-};
-
-namespace {
-static Registrar<MatMul_Op> registrarMatMulImpl_cpu("cpu", Aidge::MatMulImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(MatMul_Op, "cpu", Aidge::MatMulImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_MATMULIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MatMulImpl_kernels.hpp
similarity index 62%
rename from include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/MatMulImpl_kernels.hpp
index 5045580f..cf159629 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_MATMULIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_MATMULIMPL_KERNELS_H_
 
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 
@@ -35,18 +35,16 @@ void MatMulImpl_cpu_forward_kernel(const std::size_t n, const std::size_t k, con
     }
 }
 
-namespace {
-static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32},
-        Aidge::MatMulImpl_cpu_forward_kernel<float, float>);
-static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32},
-        Aidge::MatMulImpl_cpu_forward_kernel<int, int>);
-static Registrar<MatMulImplForward_cpu> registrarMatMulImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64},
-        Aidge::MatMulImpl_cpu_forward_kernel<double, double>);
-}  // namespace
-
+// Kernels registration to implementation entry point
+REGISTRAR(MatMulImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::defaultModel, Aidge::MatMulImpl_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(MatMulImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::defaultModel, Aidge::MatMulImpl_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(MatMulImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::defaultModel, Aidge::MatMulImpl_cpu_forward_kernel<int, int>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_MATMULIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_MATMULIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index c561da6a..68cc3621 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -17,51 +17,25 @@
 #include <tuple>
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
-// class MaxPooling_Op;
-
-// compute kernel registry for forward and backward
-class MaxPoolingImpl2DForward_cpu
-    : public Registrable<MaxPoolingImpl2DForward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(const std::array<DimSize_t, 2>&,
-                            const std::array<DimSize_t, 2>&,
-                            const bool,
-                            const std::array<DimSize_t, 4> &,
-                            const void *,
-                            void *)>> {};
-class MaxPoolingImpl2DBackward_cpu
-    : public Registrable<MaxPoolingImpl2DBackward_cpu,
-                        std::tuple<DataType, DataType>,
-                        std::function<void(const std::array<DimSize_t, 2>&,
+// Operator implementation entry point for the backend
+using MaxPooling2D_Op = MaxPooling_Op<2>;
+using MaxPoolingImpl2D_cpu = OperatorImpl_cpu<MaxPooling_Op<2>,
+    void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const bool,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)>> {};
-
-class MaxPoolingImpl2D_cpu : public OperatorImpl {
-public:
-    MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<MaxPoolingImpl2D_cpu> create(const MaxPooling_Op<2> &op) {
-        return std::make_unique<MaxPoolingImpl2D_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
+                            void *)>;
 
-namespace {
-// add cpu backend to MaxPooling_Op<2> implementation registry
-static Registrar<MaxPooling_Op<2>> registrarMaxPoolingImpl2D_cpu("cpu", Aidge::MaxPoolingImpl2D_cpu::create);
-}  // namespace
+// Implementation entry point registration to Operator
+REGISTRAR(MaxPooling2D_Op, "cpu", Aidge::MaxPoolingImpl2D_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp
similarity index 91%
rename from include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp
index 79a7bd15..e9d0e90e 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_KERNELS_H_
 
 #include <array>
 #include <cmath>
@@ -199,17 +199,16 @@ void N2D2::PoolCell_Frame_Kernels::forwardMax(const T* alpha,
 
 */
 
-namespace {
-static Registrar<MaxPoolingImpl2DForward_cpu> registrarMaxPoolingImpl2DForward_cpu_Float32(
-        std::tuple<DataType, DataType>({DataType::Float32, DataType::Float32}),
-        Aidge::MaxPoolingImpl2D_cpu_forward_kernel<float, float>);
-static Registrar<MaxPoolingImpl2DForward_cpu> registrarMaxPoolingImpl2DForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32},
-        Aidge::MaxPoolingImpl2D_cpu_forward_kernel<int, int>);
-static Registrar<MaxPoolingImpl2DForward_cpu> registrarMaxPoolingImpl2DForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64},
-        Aidge::MaxPoolingImpl2D_cpu_forward_kernel<double, double>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(MaxPoolingImpl2D_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::MaxPoolingImpl2D_cpu_forward_kernel<float, float>, nullptr});
+REGISTRAR(MaxPoolingImpl2D_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::MaxPoolingImpl2D_cpu_forward_kernel<double, double>, nullptr});
+REGISTRAR(MaxPoolingImpl2D_cpu,
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::MaxPoolingImpl2D_cpu_forward_kernel<int, int>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_MaxPOOLINGIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index 6c4cd64c..f70de8cc 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -12,7 +12,7 @@
 #ifndef AIDGE_CPU_OPERATOR_MULIMPL_H_
 #define AIDGE_CPU_OPERATOR_MULIMPL_H_
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/Mul.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
@@ -21,31 +21,12 @@
 #include <vector>
 
 namespace Aidge {
-// class Mul_Op;
+// Operator implementation entry point for the backend
+using MulImpl_cpu = OperatorImpl_cpu<Mul_Op,
+    void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>;
 
-// compute kernel registry for forward and backward
-class MulImplForward_cpu
-    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
-};
-class MulImplBackward_cpu
-    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
-};
-
-class MulImpl_cpu : public OperatorImpl {
-public:
-    MulImpl_cpu(const Mul_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<MulImpl_cpu> create(const Mul_Op& op) {
-        return std::make_unique<MulImpl_cpu>(op);
-    }
-
-    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
-    void forward() override;
-};
-
-namespace {
-static Registrar<Mul_Op> registrarMulImpl_cpu("cpu", Aidge::MulImpl_cpu::create);
-}
+// Implementation entry point registration to Operator
+REGISTRAR(Mul_Op, "cpu", Aidge::MulImpl_cpu::create);
 }  // namespace Aidge
 
 #endif /* AIDGE_CPU_OPERATOR_MULIMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
similarity index 61%
rename from include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
rename to include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
index c44199ba..c1a976c5 100644
--- a/include/aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl_kernels.hpp
@@ -9,8 +9,8 @@
  *
  ********************************************************************************/
 
-#ifndef AIDGE_CPU_OPERATOR_MULIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_MULIMPL_FORWARD_KERNEL_H_
+#ifndef AIDGE_CPU_OPERATOR_MULIMPL_KERNELS_H_
+#define AIDGE_CPU_OPERATOR_MULIMPL_KERNELS_H_
 
 #include "aidge/utils/Registrar.hpp"
 
@@ -48,20 +48,19 @@ void MulImpl_cpu_forward_kernel(const std::vector<std::size_t>& input1Dims,
     }
 }
 
-namespace {
-static Registrar<MulImplForward_cpu> registrarMulImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32, DataType::Float32},
-        Aidge::MulImpl_cpu_forward_kernel<float, float, float>);
-static Registrar<MulImplForward_cpu> registrarMulImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64, DataType::Float64},
-        Aidge::MulImpl_cpu_forward_kernel<double, double, double>);
-static Registrar<MulImplForward_cpu> registrarMulImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32, DataType::Int32},
-        Aidge::MulImpl_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t>);
-static Registrar<MulImplForward_cpu> registrarMulImplForward_cpu_Int64(
-        {DataType::Int64, DataType::Int64, DataType::Int64},
-        Aidge::MulImpl_cpu_forward_kernel<std::int64_t, std::int64_t, std::int64_t>);
-}  // namespace
+// Kernels registration to implementation entry point
+REGISTRAR(MulImpl_cpu,
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::MulImpl_cpu_forward_kernel<float, float, float>, nullptr});
+REGISTRAR(MulImpl_cpu,
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::MulImpl_cpu_forward_kernel<double, double, double>, nullptr});
+REGISTRAR(MulImpl_cpu,
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::MulImpl_cpu_forward_kernel<std::int32_t, std::int32_t, std::int32_t>, nullptr});
+REGISTRAR(MulImpl_cpu,
+    {DataType::Int64},
+    {ProdConso::inPlaceModel, Aidge::MulImpl_cpu_forward_kernel<std::int64_t, std::int64_t, std::int64_t>, nullptr});
 }  // namespace Aidge
 
-#endif /* AIDGE_CPU_OPERATOR_MULIMPL_FORWARD_KERNEL_H_ */
+#endif /* AIDGE_CPU_OPERATOR_MULIMPL_KERNELS_H_ */
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index c296aebd..bc0bd8ca 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -45,10 +45,6 @@ using PadImpl1D_cpu = OperatorImpl_cpu<Pad_Op<1>,
                             const void *,
                             void *)>;
 
-// Implementation entry point registration to Operator
-REGISTRAR(Pad1D_Op, "cpu", Aidge::PadImpl1D_cpu::create);
-
-// Operator implementation entry point for the backend
 using Pad2D_Op = Pad_Op<2>;
 using PadImpl2D_cpu = OperatorImpl_cpu<Pad_Op<2>,
     void(const std::array<DimSize_t, 4>&,
@@ -59,6 +55,7 @@ using PadImpl2D_cpu = OperatorImpl_cpu<Pad_Op<2>,
                             void *)>;
 
 // Implementation entry point registration to Operator
+REGISTRAR(Pad1D_Op, "cpu", Aidge::PadImpl1D_cpu::create);
 REGISTRAR(Pad2D_Op, "cpu", Aidge::PadImpl2D_cpu::create);
 }  // namespace Aidge
 
diff --git a/src/operator/AbsImpl.cpp b/src/operator/AbsImpl.cpp
index 589de2d8..130d6cf7 100644
--- a/src/operator/AbsImpl.cpp
+++ b/src/operator/AbsImpl.cpp
@@ -14,24 +14,27 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/backend/cpu/operator/AbsImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/AbsImpl_kernels.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Abs.hpp"
 #include "aidge/utils/Types.h"
 
+template <>
 void Aidge::AbsImpl_cpu::forward() {
     const Abs_Op& op = static_cast<const Abs_Op&>(mOp);
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<AbsImplForward_cpu>::create({
-                            op.getInput(0)->dataType(),
-                            op.getOutput(0)->dataType()
-                        });
+    const auto impl = Registrar<AbsImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(
+    impl.forward(
         op.getInput(0)->size(),
         op.getInput(0)->getImpl()->rawPtr(),
         op.getOutput(0)->getImpl()->rawPtr()
     );
 }
+
+template <>
+void Aidge::AbsImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Abs_Op on backend cpu");
+}
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 7074546f..457a0b17 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -16,64 +16,57 @@
 #include <vector>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/AddImpl_kernels.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+template <>
 void  Aidge::AddImpl_cpu::forward() {
-    const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
-    AIDGE_ASSERT(opTensor.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation.");
-    assert(opTensor.getInput(0) && "missing input in Add operator");
-    DataType datatypeFirstInput = opTensor.getInput(0)->dataType();
-    for (IOIndex_t i = 1; i < opTensor.nbInputs(); ++i) {
-        AIDGE_ASSERT(opTensor.getInput(i)->hasImpl(), "cannot run Add forward because the {}-th input has no implementation.", i);
-        assert(opTensor.getInput(i) && "missing input in Add operator");
-        assert(opTensor.getInput(i)->dataType() == datatypeFirstInput);
+    const Add_Op& op = static_cast<const Add_Op&>(mOp);
+    // Check inputs
+    AIDGE_ASSERT(op.getInput(0), "missing input in Add operator");
+    AIDGE_ASSERT(op.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation.");
+    DataType datatypeFirstInput = op.getInput(0)->dataType();
+    for (IOIndex_t i = 1; i < op.nbInputs(); ++i) {
+        AIDGE_ASSERT(op.getInput(i), "missing input in Add operator");
+        AIDGE_ASSERT(op.getInput(i)->hasImpl(), "cannot run Add forward because the {}-th input has no implementation.", i);
+        AIDGE_ASSERT(op.getInput(i)->dataType() == datatypeFirstInput, "Cannot add inputs with two differents data type.");
     }
 
     // Find the correct kernel type
-    const auto outputDataType = opTensor.getOutput(0)->dataType();
-    const Registrar<AddImplForward_cpu>::registrar_key registrarKey = {
-        datatypeFirstInput,
-        outputDataType};
-
-    Registrar<AddImplForward_cpu>::registrar_type kernelFunc;
-    if (Registrar<AddImplForward_cpu>::exists(registrarKey)) {
-        // One exists with the right inputs/output types
-        kernelFunc = Registrar<AddImplForward_cpu>::create(registrarKey);
-    }
-    else {
-        // Otherwise, fallback to the kernel with all types matching output type
-        kernelFunc = Registrar<AddImplForward_cpu>::create({
-            outputDataType, outputDataType});
-    }
+    const auto impl = Registrar<AddImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
     // call to forward(). We might put the following shared_ptr as members of
     // this class to avoid that.
-    const std::size_t nbDims = opTensor.getOutput(0)->nbDims();
+    const std::size_t nbDims = op.getOutput(0)->nbDims();
     std::vector<std::vector<std::size_t>> inputsDims;
     std::vector<const void*> opInputs;
-    std::vector<std::shared_ptr<Tensor>> inputsFallback(opTensor.nbInputs());
-    for (IOIndex_t i = 0; i < opTensor.nbInputs(); ++i) {
+    std::vector<std::shared_ptr<Tensor>> inputsFallback(op.nbInputs());
+    for (IOIndex_t i = 0; i < op.nbInputs(); ++i) {
         std::vector<std::size_t> inputDims(nbDims, 1);
-        auto dims = opTensor.getInput(i)->dims();
+        auto dims = op.getInput(i)->dims();
 		for(std::size_t j=dims.size()-1; j+1>0; --j)
 		{
 			std::size_t idx = nbDims - (dims.size()-j);
 			inputDims[idx] = dims[j];
 		}
         inputsDims.push_back(inputDims);
-        const auto& input = opTensor.getInput(i)->refCastFrom(inputsFallback[i], *opTensor.getOutput(0));
+        const auto& input = op.getInput(i)->refCastFrom(inputsFallback[i], *op.getOutput(0));
         opInputs.push_back(input.getImpl()->rawPtr());
     }
 
-    kernelFunc(opInputs,
+    impl.forward(opInputs,
                inputsDims,
-               opTensor.getOutput(0)->size(),
-               opTensor.getOutput(0)->dims(),
-               getCPUPtr(opTensor.getRawOutput(0)));
+               op.getOutput(0)->size(),
+               op.getOutput(0)->dims(),
+               getCPUPtr(op.getRawOutput(0)));
+}
+
+template <>
+void Aidge::AddImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Add_Op on backend cpu");
 }
diff --git a/src/operator/AndImpl.cpp b/src/operator/AndImpl.cpp
index 78a273d0..2e0f5976 100644
--- a/src/operator/AndImpl.cpp
+++ b/src/operator/AndImpl.cpp
@@ -21,25 +21,29 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/AndImpl.hpp"
-#include "aidge/backend/cpu/operator/AndImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/AndImpl_kernels.hpp"
 
+template <>
 void Aidge::AndImpl_cpu::forward() {
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<AndImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
-
     const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
                                                                    std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
     const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
                                                                    std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
 
+
+    // Find the correct kernel type
+    const auto impl = Registrar<AndImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+
     // Call kernel
-    kernelFunc(inputDims0,
+    impl.forward(inputDims0,
         inputDims1,
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+template <>
+void Aidge::AndImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for And_Op on backend cpu");
+}
diff --git a/src/operator/ArgMaxImpl.cpp b/src/operator/ArgMaxImpl.cpp
index eda3c0b2..b8fb85a7 100644
--- a/src/operator/ArgMaxImpl.cpp
+++ b/src/operator/ArgMaxImpl.cpp
@@ -16,19 +16,24 @@
 
 #include "aidge/utils/Types.h"
 #include "aidge/operator/ArgMax.hpp"
-#include "aidge/backend/cpu/operator/ArgMaxImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/ArgMaxImpl_kernels.hpp"
 
+template <>
 void Aidge::ArgMaxImpl_cpu::forward() {
     const ArgMax_Op& op_ = dynamic_cast<const ArgMax_Op&>(mOp);
+
     // Find the correct kernel type
-    auto kernelFunc = Registrar<ArgMaxImplForward_cpu>::create({
-        op_.getInput(0)->dataType(),
-        op_.getOutput(0)->dataType()});
+    const auto impl = Registrar<ArgMaxImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(op_.axis(),
+    impl.forward(op_.axis(),
                 op_.selectLastIndex(),
                 op_.getInput(0)->dims(),
                 op_.getInput(0)->getImpl()->rawPtr(),
                 op_.getOutput(0)->getImpl()->rawPtr());
 }
+
+template <>
+void Aidge::ArgMaxImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for ArgMax_Op on backend cpu");
+}
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index 798ca01b..01a5e8cf 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -16,24 +16,29 @@
 #include <vector>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/utils/Types.h"
 
+template <>
 void Aidge::AvgPoolingImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const AvgPooling_Op<2>&>(mOp);
     assert(op_.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<AvgPoolingImpl2DForward_cpu>::create(
-        {op_.getInput(0)->dataType(),
-         op_.getOutput(0)->dataType()});
+    const auto impl = Registrar<AvgPoolingImpl2D_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(op_.strideDims(),
+    impl.forward(op_.strideDims(),
                op_.kernelDims(),
                op_.getInput(0)->template dims<4>(),
                getCPUPtr(op_.getInput(0)),
                getCPUPtr(op_.getOutput(0)));
 }
+
+template <>
+void Aidge::AvgPoolingImpl2D_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for AvgPooling_Op<2> on backend cpu");
+}
+
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index 8291dded..9f1d986e 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -19,8 +19,9 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/operator/BatchNorm.hpp"
 
-#include "aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp"
 
+template <>
 void Aidge::BatchNormImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const BatchNorm_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 for BatchNorm Operator");
@@ -30,14 +31,12 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
     AIDGE_ASSERT(op_.getInput(4), "missing input #4 for BatchNorm Operator");
 
     AIDGE_ASSERT(op_.getOutput(0)->nbDims() == 4, "");
+
     // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<BatchNormImpl2DForward_cpu>::create({op_.getInput(0)->dataType(),
-                                                           op_.getInput(1)->dataType(),
-                                                           op_.getOutput(0)->dataType()});
+    const auto impl = Registrar<BatchNormImpl2D_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(op_.epsilon(),
+    impl.forward(op_.epsilon(),
             op_.momentum(),
             op_.getInput(0)->template dims<4>(),
             getCPUPtr(op_.getRawInput(0)),
@@ -48,3 +47,8 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
             getCPUPtr(op_.getRawOutput(0)),
             true);
 }
+
+template <>
+void Aidge::BatchNormImpl2D_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for BatchNorm_Op<2> on backend cpu");
+}
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index ba3d4d8e..d86bba8d 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -15,12 +15,13 @@
 #include <vector>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
+template <>
 void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
     const auto& op_ = dynamic_cast<const ConvDepthWise_Op<1>&>(mOp);
 
@@ -30,23 +31,7 @@ void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
     AIDGE_ASSERT((op_.getInput(0)->nbDims() == 3), "support for 4-dimensions tensors only");
 
     // Find the correct kernel type
-    const auto outputDataType = op_.getOutput(0)->dataType();
-    const Registrar<ConvDepthWiseImpl1DForward_cpu>::registrar_key registrarKey = {
-        op_.getInput(0)->dataType(),
-        op_.getInput(1)->dataType(),
-        ((op_.getInput(2)) ? op_.getInput(2)->dataType() : op_.getInput(1)->dataType()),
-        outputDataType};
-
-    Registrar<ConvDepthWiseImpl1DForward_cpu>::registrar_type kernelFunc;
-    if (Registrar<ConvDepthWiseImpl1DForward_cpu>::exists(registrarKey)) {
-        // One exists with the right inputs/output types
-        kernelFunc = Registrar<ConvDepthWiseImpl1DForward_cpu>::create(registrarKey);
-    }
-    else {
-        // Otherwise, fallback to the kernel with all types matching output type
-        kernelFunc = Registrar<ConvDepthWiseImpl1DForward_cpu>::create({
-            outputDataType, outputDataType, outputDataType, outputDataType});
-    }
+    const auto impl = Registrar<ConvDepthWiseImpl1D_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -58,7 +43,7 @@ void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
     const auto& input2 = (op_.getInput(2)) ? op_.getInput(2)->refCastFrom(input2Fallback, *op_.getOutput(0)) : Tensor();
 
     // Call kernel
-    kernelFunc(op_.strideDims(),
+    impl.forward(op_.strideDims(),
                 op_.dilationDims(),
                 op_.kernelDims(), // Conv attributes
                op_.getInput(0)->template dims<3>(), // input dimensions
@@ -69,6 +54,12 @@ void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
             );
 }
 
+template <>
+void Aidge::ConvDepthWiseImpl1D_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for ConvDepthWise_Op<1> on backend cpu");
+}
+
+template <>
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const ConvDepthWise_Op<2>&>(mOp);
 
@@ -79,11 +70,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     AIDGE_ASSERT((op_.getInput(0)->nbDims() == 4), "support for 4-dimensions tensors only");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<ConvDepthWiseImpl2DForward_cpu>::create(
-        {op_.getInput(0)->dataType(),
-        op_.getInput(1)->dataType(),
-        op_.getInput(2)->dataType(),
-        op_.getOutput(0)->dataType()});
+    const auto impl = Registrar<ConvDepthWiseImpl2D_cpu>::create(getBestMatch(getRequiredSpec()));
 
         // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -95,7 +82,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     const auto& input2 = op_.getInput(2) ? op_.getInput(2)->refCastFrom(input2Fallback, *op_.getOutput(0)) : Tensor();
 
     // Call kernel
-    kernelFunc(op_.strideDims(),
+    impl.forward(op_.strideDims(),
             op_.dilationDims(),
             op_.kernelDims(),
             op_.getInput(0)->template dims<4>(),
@@ -104,3 +91,8 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
             op_.getInput(2) ?  input2.getImpl()->rawPtr() : nullptr,
             getCPUPtr(op_.getRawOutput(0)));
 }
+
+template <>
+void Aidge::ConvDepthWiseImpl2D_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for ConvDepthWise_Op<2> on backend cpu");
+}
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 0f2a77a2..fdfe19fb 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -30,6 +30,7 @@ void Aidge::ConvImpl1D_cpu::forward() {
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Conv Operator.");
     AIDGE_ASSERT(op_.getInput(1), "missing input #1 in Conv Operator.");
 
+    // Find the correct kernel type
     const auto impl = Registrar<ConvImpl1D_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
@@ -67,6 +68,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Conv Operator.");
     AIDGE_ASSERT(op_.getInput(1), "missing input #1 in Conv Operator.");
 
+    // Find the correct kernel type
     const auto impl = Registrar<ConvImpl2D_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index 3869b3a7..135b32b5 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -15,10 +15,11 @@
 #include "aidge/backend/cpu/data/Broadcasting.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 #include "aidge/backend/cpu/operator/DivImpl.hpp"
-#include "aidge/backend/cpu/operator/DivImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/DivImpl_kernels.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
+template <>
 void Aidge::DivImpl_cpu::forward() {
     // Find the correct kernel type
     // auto kernelFunc = Registrar<DivImplForward_cpu>::create({
@@ -55,10 +56,7 @@ void Aidge::DivImpl_cpu::forward() {
     const auto& opTensor = static_cast<const Div_Op&>(mOp);
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<DivImplForward_cpu>::create({
-        opTensor.getInput(0)->dataType(),
-        opTensor.getInput(1)->dataType(),
-        opTensor.getOutput(0)->dataType()});
+    const auto impl = Registrar<DivImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Compute compatible input dimensions
     std::vector<std::size_t>        dims0   = opTensor.getInput(0)->dims();
@@ -68,7 +66,7 @@ void Aidge::DivImpl_cpu::forward() {
     // special case for equal dimensions, the kernel is called with the entire arrays at once
     if (dims0 == dims1) {
         const std::size_t input0_contiguous_size = std::accumulate(dims0.cbegin(), dims0.cend(), std::size_t(1), std::multiplies<std::size_t>());
-        kernelFunc(input0_contiguous_size, input0_contiguous_size, input0_contiguous_size,
+        impl.forward(input0_contiguous_size, input0_contiguous_size, input0_contiguous_size,
                     getCPUPtr(mOp.getRawInput(0)),
                     getCPUPtr(mOp.getRawInput(1)),
                     getCPUPtr(mOp.getRawOutput(0)));
@@ -134,7 +132,7 @@ void Aidge::DivImpl_cpu::forward() {
     std::size_t dim = contiguousIdx - 1;
     const std::size_t nbStacks = std::accumulate(outDims.cbegin(), outDims.cbegin() + contiguousIdx, std::size_t(1), std::multiplies<std::size_t>());
     for (std::size_t stack = 0; stack < nbStacks;) {
-        kernelFunc(input0_contiguous_size, input1_contiguous_size, output_contiguous_size,
+        impl.forward(input0_contiguous_size, input1_contiguous_size, output_contiguous_size,
                     getCPUPtr(mOp.getRawInput(0), offsetIn0*input0_contiguous_size),
                     getCPUPtr(mOp.getRawInput(1), offsetIn1*input1_contiguous_size),
                     getCPUPtr(mOp.getRawOutput(0), offsetOut*output_contiguous_size));
@@ -151,3 +149,8 @@ void Aidge::DivImpl_cpu::forward() {
         }
     }
 }
+
+template <>
+void Aidge::DivImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Div_Op on backend cpu");
+}
diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp
index b32e19b6..42c6ce87 100644
--- a/src/operator/ErfImpl.cpp
+++ b/src/operator/ErfImpl.cpp
@@ -14,24 +14,27 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/backend/cpu/operator/ErfImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/ErfImpl_kernels.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/Erf.hpp"
 #include "aidge/utils/Types.h"
 
+template <>
 void Aidge::ErfImpl_cpu::forward() {
     const Erf_Op& op = static_cast<const Erf_Op&>(mOp);
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<ErfImplForward_cpu>::create({
-                            op.getInput(0)->dataType(),
-                            op.getOutput(0)->dataType()
-                        });
+    const auto impl = Registrar<ErfImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(
+    impl.forward(
         op.getInput(0)->size(),
         op.getInput(0)->getImpl()->rawPtr(),
         op.getOutput(0)->getImpl()->rawPtr()
     );
 }
+
+template <>
+void Aidge::ErfImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Erf_Op on backend cpu");
+}
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index f7eebb7b..35945271 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -17,37 +17,20 @@
 #include <tuple>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/FCImpl_backward_kernels.hpp"
-#include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/FCImpl_kernels.hpp"
 #include "aidge/operator/FC.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 #include "aidge/utils/Types.h"
 
 
+template <>
 void Aidge::FCImpl_cpu::forward()
 {
     const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0");
     AIDGE_ASSERT(op_.getInput(1), "missing input #1");
 
-    // Find the correct kernel type
-    const auto outputDataType = op_.getOutput(0)->dataType();
-    const Registrar<FCImplForward_cpu>::registrar_key registrarKey = {
-        op_.getInput(0)->dataType(),
-        op_.getInput(1)->dataType(),
-        ((op_.getInput(2)) ? op_.getInput(2)->dataType() : op_.getInput(1)->dataType()),
-        outputDataType};
-
-    Registrar<FCImplForward_cpu>::registrar_type kernelFunc;
-    if (Registrar<FCImplForward_cpu>::exists(registrarKey)) {
-        // One exists with the right inputs/output types
-        kernelFunc = Registrar<FCImplForward_cpu>::create(registrarKey);
-    }
-    else {
-        // Otherwise, fallback to the kernel with all types matching output type
-        kernelFunc = Registrar<FCImplForward_cpu>::create({
-            outputDataType, outputDataType, outputDataType, outputDataType});
-    }
+    const auto impl = Registrar<FCImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -60,7 +43,7 @@ void Aidge::FCImpl_cpu::forward()
 
     // Call kernel
     const auto batchSize = (input0.dims().size() > 1) ? input0.dims()[0] : 1;
-    kernelFunc(batchSize,
+    impl.forward(batchSize,
         input1.dims()[1], // nb input features
         input1.dims()[0], // nb output features
         input0.getImpl()->rawPtr(),
@@ -69,6 +52,7 @@ void Aidge::FCImpl_cpu::forward()
         getCPUPtr(mOp.getRawOutput(0)));
 }
 
+template <>
 void Aidge::FCImpl_cpu::backward()
 {
     const FC_Op& op_ = dynamic_cast<const FC_Op&>(mOp);
@@ -77,23 +61,7 @@ void Aidge::FCImpl_cpu::backward()
     AIDGE_ASSERT(op_.getInput(0)->grad(), "missing input #0 gradient");
     AIDGE_ASSERT(op_.getInput(1)->grad(), "missing input #1 gradient");
 
-    // Find the correct kernel type
-    const Registrar<FCImplBackward_cpu>::registrar_key registrarKey = {
-        fc_grad->dataType(),
-        op_.getInput(1)->grad()->dataType(),
-        (op_.getInput(2)) ? op_.getInput(2)->grad()->dataType() : op_.getInput(1)->grad()->dataType(),
-        op_.getInput(0)->grad()->dataType()};
-
-    Registrar<FCImplBackward_cpu>::registrar_type kernelFunc;
-    if (Registrar<FCImplBackward_cpu>::exists(registrarKey)) {
-        // One exists with the right inputs/output types
-        kernelFunc = Registrar<FCImplBackward_cpu>::create(registrarKey);
-    }
-    else {
-        // Otherwise, fallback to the kernel with all types matching output type
-        kernelFunc = Registrar<FCImplBackward_cpu>::create({
-            fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType(), fc_grad->dataType()});
-    }
+    const auto impl = Registrar<FCImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Convert input data (no overhead if not needed!)
     // TODO: right now, if needed, memory will be allocated/deallocated at each
@@ -106,7 +74,7 @@ void Aidge::FCImpl_cpu::backward()
 
     // Call kernel
     const auto batchSize = (input0grad.dims().size() > 1) ? input0grad.dims()[0] : 1;
-    kernelFunc(batchSize,
+    impl.backward(batchSize,
         input1grad.dims()[1], // nb input features
         input1grad.dims()[0], // nb output features
         getCPUPtr(fc_grad),
diff --git a/src/operator/FoldImpl.cpp b/src/operator/FoldImpl.cpp
index 532ba946..10f3d7b5 100644
--- a/src/operator/FoldImpl.cpp
+++ b/src/operator/FoldImpl.cpp
@@ -20,18 +20,18 @@
 #include "aidge/operator/Conv.hpp"
 
 #include "aidge/backend/cpu/operator/FoldImpl.hpp"
-#include "aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/FoldImpl_kernels.hpp"
 
+template <>
 void Aidge::FoldImpl2D_cpu::forward() {
+    const auto& op_ = static_cast<const Fold_Op<2>&>(mOp);
     assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc =
-            Registrar<FoldImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+    const auto impl = Registrar<FoldImpl2D_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    const auto& op_ = static_cast<const Fold_Op<2>&>(mOp);
-    kernelFunc(op_.outputDims(),
+    impl.forward(op_.outputDims(),
                 op_.strideDims(),
                 op_.dilationDims(),
                 op_.kernelDims(),
@@ -39,3 +39,8 @@ void Aidge::FoldImpl2D_cpu::forward() {
                 getCPUPtr(mOp.getRawInput(0)),
                 getCPUPtr(mOp.getRawOutput(0)));
 }
+
+template <>
+void Aidge::FoldImpl2D_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Fold_Op<2> on backend cpu");
+}
diff --git a/src/operator/GlobalAveragePoolingImpl.cpp b/src/operator/GlobalAveragePoolingImpl.cpp
index f7280360..c53f92e1 100644
--- a/src/operator/GlobalAveragePoolingImpl.cpp
+++ b/src/operator/GlobalAveragePoolingImpl.cpp
@@ -15,7 +15,7 @@
 #include <memory>
 #include <vector>
 
-#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp"
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/GlobalAveragePooling.hpp"
@@ -24,18 +24,23 @@
 #include "aidge/utils/Types.h"
 
 
+template <>
 void Aidge::GlobalAveragePoolingImpl_cpu::forward()
 {
     const GlobalAveragePooling_Op& op_ = static_cast<const GlobalAveragePooling_Op&>(mOp);
     // Check if input is provided
     AIDGE_ASSERT(op_.getInput(0), "missing input 0");
 
-    // Create the forward kernal with the wanted types
-    auto kernelFunc = Registrar<GlobalAveragePoolingImplForward_cpu>::create({op_.getInput(0)->dataType(),
-                                                                              op_.getOutput(0)->dataType()});
+    // Find the correct kernel type
+    const auto impl = Registrar<GlobalAveragePoolingImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(op_.getInput(0)->dims(),
+    impl.forward(op_.getInput(0)->dims(),
                op_.getInput(0)->getImpl()->rawPtr(),
                op_.getOutput(0)->getImpl()->rawPtr());
-}
\ No newline at end of file
+}
+
+template <>
+void Aidge::GlobalAveragePoolingImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for GlobalAveragePooling_Op on backend cpu");
+}
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index e7088742..6c0802dd 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -14,14 +14,14 @@
 #include <vector>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp"
-#include "aidge/backend/cpu/operator/LeakyReLUImpl_backward_kernels.hpp"
+#include "aidge/backend/cpu/operator/LeakyReLUImpl_kernels.hpp"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/operator/LeakyReLU.hpp"
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Registrar.hpp"
 
+template <>
 void Aidge::LeakyReLUImpl_cpu::forward() {
     const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp);
 
@@ -30,17 +30,16 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
     AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({
-        in0->dataType(),
-        out0->dataType()});
+    const auto impl = Registrar<LeakyReLUImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(op_.negativeSlope(),
+    impl.forward(op_.negativeSlope(),
         in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
 
+template <>
 void Aidge::LeakyReLUImpl_cpu::backward() {
     // reversing in and out Data for backprop
     const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp);
@@ -49,12 +48,10 @@ void Aidge::LeakyReLUImpl_cpu::backward() {
     AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<LeakyReLUImplForward_cpu>::create({
-        in0->dataType(),
-        out0->dataType()});
+    const auto impl = Registrar<LeakyReLUImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(op_.negativeSlope(),
+    impl.backward(op_.negativeSlope(),
         in0->size(),
         getCPUPtr(in0),
         getCPUPtr(out0));
diff --git a/src/operator/LnImpl.cpp b/src/operator/LnImpl.cpp
index ec1d5cf3..79df7339 100644
--- a/src/operator/LnImpl.cpp
+++ b/src/operator/LnImpl.cpp
@@ -20,9 +20,9 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/LnImpl.hpp"
-#include "aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp"
-#include "aidge/backend/cpu/operator/LnImpl_backward_kernels.hpp"
+#include "aidge/backend/cpu/operator/LnImpl_kernels.hpp"
 
+template <>
 void Aidge::LnImpl_cpu::forward() {
     const Ln_Op& op_ = static_cast<const Ln_Op&>(mOp);
 	std::shared_ptr<Tensor> in0 = op_.getInput(0);
@@ -30,16 +30,15 @@ void Aidge::LnImpl_cpu::forward() {
     AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<LnImplForward_cpu>::create({
-        in0->dataType(),
-	    out0->dataType()});
+    const auto impl = Registrar<LnImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(in0->size(),
+    impl.forward(in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
 
+template <>
 void Aidge::LnImpl_cpu::backward() {
     const Ln_Op& op_ = dynamic_cast<const Ln_Op&>(mOp);
 	std::shared_ptr<Tensor> in0  = op_.getInput(0);
@@ -49,12 +48,8 @@ void Aidge::LnImpl_cpu::backward() {
     AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<LnImplBackward_cpu>::create({
-        in0->dataType(),
-	    gra_int0->dataType(),
-        gra_out0->dataType()        
-    });
+    const auto impl = Registrar<LnImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+    impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }
diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp
index e7167268..ccd3265e 100644
--- a/src/operator/MatMulImpl.cpp
+++ b/src/operator/MatMulImpl.cpp
@@ -19,17 +19,16 @@
 #include "aidge/utils/Types.h"
 
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
-#include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/MatMulImpl_kernels.hpp"
 
+template <>
 void Aidge::MatMulImpl_cpu::forward()
 {
     assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(0)) && "missing input #0");
     assert(std::static_pointer_cast<Tensor>(mOp.getRawInput(1)) && "missing input #1");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<MatMulImplForward_cpu>::create(
-        {std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
+    const auto impl = Registrar<MatMulImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Compute compatible input dimensions
     std::vector<std::size_t> dims0 = static_cast<const MatMul_Op&>(mOp).getInput(0)->dims();
@@ -91,7 +90,7 @@ void Aidge::MatMulImpl_cpu::forward()
     const std::size_t matrix1Size = k*m;
     const std::size_t matrixOutSize = n*m;
     for (std::size_t stack = 0; stack < nbMatrices;) {
-        kernelFunc(n, k, m,
+        impl.forward(n, k, m,
                     getCPUPtr(mOp.getRawInput(0), offsetIn0*matrix0Size),
                     getCPUPtr(mOp.getRawInput(1), offsetIn1*matrix1Size),
                     getCPUPtr(mOp.getRawOutput(0), offsetOut*matrixOutSize));
@@ -126,3 +125,8 @@ void Aidge::MatMulImpl_cpu::forward()
 //         getCPUPtr(mOp.getRawInput(1)),
 //         getCPUPtr(mOp.getRawOutput(0)));
 // }
+
+template <>
+void Aidge::MatMulImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for MatMul_Op on backend cpu");
+}
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index ec21feb9..90075a39 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -14,26 +14,29 @@
 #include <vector>
 
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
-#include "aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp"
 #include "aidge/operator/MaxPooling.hpp"
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
+template <>
 void Aidge::MaxPoolingImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const MaxPooling_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in MaxPooling Operator.");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<MaxPoolingImpl2DForward_cpu>::create({
-        op_.getInput(0)->dataType(),
-        op_.getOutput(0)->dataType()
-    });
+    const auto impl = Registrar<MaxPoolingImpl2D_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(op_.strideDims(),
+    impl.forward(op_.strideDims(),
                 op_.kernelDims(),
                 op_.ceilMode(),
                 op_.getInput(0)->template dims<4>(),
                 getCPUPtr(mOp.getRawInput(0)),
                 getCPUPtr(mOp.getRawOutput(0)));
 }
+
+template <>
+void Aidge::MaxPoolingImpl2D_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for MaxPooling_Op<2> on backend cpu");
+}
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index c6a820e2..541e9313 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -21,25 +21,28 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
-#include "aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp"
+#include "aidge/backend/cpu/operator/MulImpl_kernels.hpp"
 
+template <>
 void Aidge::MulImpl_cpu::forward() {
-    // Find the correct kernel type
-    auto kernelFunc = Registrar<MulImplForward_cpu>::create({
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dataType(),
-        std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()});
-
     const std::vector<std::size_t> inputDims0 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
                                                                    std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims());
     const std::vector<std::size_t> inputDims1 = getBroadcastedDims(std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
                                                                    std::static_pointer_cast<Tensor>(mOp.getRawInput(1))->dims());
 
+    // Find the correct kernel type
+    const auto impl = Registrar<MulImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+
     // Call kernel
-    kernelFunc(inputDims0,
+    impl.forward(inputDims0,
         inputDims1,
         std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dims(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawInput(1)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
+
+template <>
+void Aidge::MulImpl_cpu::backward() {
+    AIDGE_THROW_OR_ABORT(std::runtime_error, "Backward not yet implemented for Mul_Op on backend cpu");
+}
-- 
GitLab