From dcbeb57fd8c2d4aca630b97aa7c87db915145d45 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Mon, 26 Aug 2024 11:02:04 +0200
Subject: [PATCH] First working concept: check ReLU operator

---
 .../aidge/backend/cpu/operator/AddImpl.hpp    |  6 +-
 .../backend/cpu/operator/AvgPoolingImpl.hpp   | 10 +--
 .../backend/cpu/operator/BatchNormImpl.hpp    | 10 +--
 .../cpu/operator/ConvDepthWiseImpl.hpp        | 16 ++---
 .../aidge/backend/cpu/operator/ConvImpl.hpp   | 16 ++---
 .../aidge/backend/cpu/operator/DivImpl.hpp    |  8 +--
 .../aidge/backend/cpu/operator/ErfImpl.hpp    |  6 +-
 include/aidge/backend/cpu/operator/FCImpl.hpp |  8 +--
 .../aidge/backend/cpu/operator/FoldImpl.hpp   |  4 +-
 .../cpu/operator/GlobalAveragePoolingImpl.hpp |  4 +-
 .../backend/cpu/operator/LeakyReLUImpl.hpp    | 10 +--
 include/aidge/backend/cpu/operator/LnImpl.hpp |  6 +-
 .../aidge/backend/cpu/operator/MatMulImpl.hpp |  8 +--
 .../backend/cpu/operator/MaxPoolingImpl.hpp   | 10 +--
 .../aidge/backend/cpu/operator/MulImpl.hpp    |  6 +-
 .../backend/cpu/operator/OperatorImpl.hpp     | 50 ++++++++++++++
 .../aidge/backend/cpu/operator/PadImpl.hpp    | 22 +++---
 .../aidge/backend/cpu/operator/PowImpl.hpp    |  6 +-
 .../aidge/backend/cpu/operator/ReLUImpl.hpp   | 68 +++++++++++++------
 .../operator/ReLUImpl_backward_kernels.hpp    | 46 -------------
 .../cpu/operator/ReLUImpl_forward_kernels.hpp | 44 ------------
 .../backend/cpu/operator/ReduceMeanImpl.hpp   | 20 +++---
 .../backend/cpu/operator/ScalingImpl.hpp      | 10 +--
 .../backend/cpu/operator/SigmoidImpl.hpp      |  6 +-
 .../aidge/backend/cpu/operator/SliceImpl.hpp  | 10 +--
 .../backend/cpu/operator/SoftmaxImpl.hpp      |  6 +-
 .../aidge/backend/cpu/operator/SqrtImpl.hpp   |  6 +-
 .../aidge/backend/cpu/operator/SubImpl.hpp    |  6 +-
 .../aidge/backend/cpu/operator/TanhImpl.hpp   |  6 +-
 src/operator/AddImpl.cpp                      |  5 --
 src/operator/AvgPoolingImpl.cpp               |  5 --
 src/operator/BatchNormImpl.cpp                |  5 --
 src/operator/ConvDepthWiseImpl.cpp            | 11 ---
 src/operator/ConvImpl.cpp                     | 10 ---
 src/operator/DivImpl.cpp                      |  5 --
 src/operator/ErfImpl.cpp                      |  5 --
 src/operator/LeakyReLUImpl.cpp                |  6 --
 src/operator/LnImpl.cpp                       |  5 --
 src/operator/MaxPoolingImpl.cpp               |  6 --
 src/operator/MulImpl.cpp                      |  5 --
 src/operator/PadImpl.cpp                      | 14 +---
 src/operator/PowImpl.cpp                      |  5 --
 src/operator/ReLUImpl.cpp                     | 23 ++-----
 src/operator/ScalingImpl.cpp                  |  5 --
 src/operator/SigmoidImpl.cpp                  |  5 --
 src/operator/SliceImpl.cpp                    |  5 --
 src/operator/SoftmaxImpl.cpp                  |  5 --
 src/operator/SqrtImpl.cpp                     |  5 --
 src/operator/SubImpl.cpp                      |  5 --
 src/operator/TanhImpl.cpp                     |  5 --
 50 files changed, 219 insertions(+), 360 deletions(-)
 create mode 100644 include/aidge/backend/cpu/operator/OperatorImpl.hpp
 delete mode 100644 include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
 delete mode 100644 include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp

diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 7a1497a2..f71e3fcf 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -26,10 +26,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class AddImplForward_cpu
-    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
+    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)>> {};
 
 class AddImplBackward_cpu
-    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
+    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)>> {};
 
 
 class AddImpl_cpu : public OperatorImpl {
@@ -40,7 +40,7 @@ public:
         return std::make_unique<AddImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index 12a5dc33..7fbd7cd9 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -30,19 +30,19 @@ namespace Aidge {
 class AvgPoolingImpl2DForward_cpu
     : public Registrable<AvgPoolingImpl2DForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class AvgPoolingImpl2DBackward_cpu
     : public Registrable<AvgPoolingImpl2DBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class AvgPoolingImpl2D_cpu : public OperatorImpl {
 public:
@@ -52,7 +52,7 @@ public:
         return std::make_unique<AvgPoolingImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index 93bdab2d..45860e33 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -30,7 +30,7 @@ namespace Aidge {
 class BatchNormImpl2DForward_cpu
     : public Registrable<BatchNormImpl2DForward_cpu,
                          std::tuple<DataType, DataType, DataType>,
-                         void(float,
+                         std::function<void(float,
                             float,
                             const std::array<DimSize_t, 4> &,
                             const void *,
@@ -39,11 +39,11 @@ class BatchNormImpl2DForward_cpu
                             void *,
                             void *,
                             void *,
-                            const bool)> {};
+                            const bool)>> {};
 class BatchNormImpl2DBackward_cpu
     : public Registrable<BatchNormImpl2DBackward_cpu,
                          std::tuple<DataType, DataType, DataType>,
-                         void(float,
+                         std::function<void(float,
                             float,
                             const std::array<DimSize_t, 4> &,
                             const void *,
@@ -51,7 +51,7 @@ class BatchNormImpl2DBackward_cpu
                             const void *,
                             void *,
                             void *,
-                            void *)> {};
+                            void *)>> {};
 
 class BatchNormImpl2D_cpu : public OperatorImpl {
 public:
@@ -61,7 +61,7 @@ public:
         return std::make_unique<BatchNormImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index ec886a31..5e59d502 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -29,14 +29,14 @@ namespace Aidge {
 class ConvDepthWiseImpl1DForward_cpu
     : public Registrable<ConvDepthWiseImpl1DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 1>&,
+                         std::function<void(const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 3>&,
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvDepthWiseImpl1D_cpu : public OperatorImpl {
 public:
@@ -46,7 +46,7 @@ public:
         return std::make_unique<ConvDepthWiseImpl1D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
@@ -59,18 +59,18 @@ static Registrar<ConvDepthWise_Op<1>> registrarConvDepthWiseImpl1D_cpu("cpu", Ai
 class ConvDepthWiseImpl2DForward_cpu
     : public Registrable<ConvDepthWiseImpl2DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4> &,
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class ConvDepthWiseImpl2DBackward_cpu
     : public Registrable<ConvDepthWiseImpl2DBackward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             bool,
@@ -78,7 +78,7 @@ class ConvDepthWiseImpl2DBackward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
 public:
@@ -88,7 +88,7 @@ public:
         return std::make_unique<ConvDepthWiseImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index d7be46c2..5cd1c804 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -31,7 +31,7 @@ namespace Aidge {
 class ConvImpl1DForward_cpu
     : public Registrable<ConvImpl1DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 1>&,
+                         std::function<void(const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 3> &,
@@ -39,7 +39,7 @@ class ConvImpl1DForward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvImpl1D_cpu : public OperatorImpl {
    public:
@@ -50,7 +50,7 @@ class ConvImpl1D_cpu : public OperatorImpl {
     }
 
    public:
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
@@ -63,7 +63,7 @@ static Registrar<Conv_Op<1>> registrarConvImpl1D_cpu("cpu", Aidge::ConvImpl1D_cp
 class ConvImpl2DForward_cpu
     : public Registrable<ConvImpl2DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4> &,
@@ -71,11 +71,11 @@ class ConvImpl2DForward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class ConvImpl2DBackward_cpu
     : public Registrable<ConvImpl2DBackward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             bool,
@@ -83,7 +83,7 @@ class ConvImpl2DBackward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvImpl2D_cpu : public OperatorImpl {
    public:
@@ -94,7 +94,7 @@ class ConvImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 3a19d730..c969e19e 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -25,11 +25,11 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class DivImplForward_cpu
-    // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
-    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
+    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)>> {
 };
 class DivImplBackward_cpu
-    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class DivImpl_cpu : public OperatorImpl {
@@ -40,7 +40,7 @@ public:
         return std::make_unique<DivImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 
     void forward() override final;
 };
diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp
index 6864803a..11aed23d 100644
--- a/include/aidge/backend/cpu/operator/ErfImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp
@@ -24,10 +24,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class ErfImplForward_cpu
-    : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class ErfImplBackward_cpu
-    : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 
 class ErfImpl_cpu : public OperatorImpl {
@@ -38,7 +38,7 @@ public:
         return std::make_unique<ErfImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index f21cd0ff..fefd88b4 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -30,19 +30,19 @@ class FCImplForward_cpu : public Registrable<FCImplForward_cpu,
                                                         DataType,
                                                         DataType,
                                                         DataType>,
-                                             void(const DimSize_t,
+                                             std::function<void(const DimSize_t,
                                                 const DimSize_t,
                                                 const DimSize_t,
                                                 const void *,
                                                 const void *,
                                                 const void *,
-                                                void *)> {};
+                                                void *)>> {};
 class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
                                               std::tuple<DataType,
                                                          DataType,
                                                          DataType,
                                                          DataType>,
-                                              void(const DimSize_t,
+                                              std::function<void(const DimSize_t,
                                                 const DimSize_t,
                                                 const DimSize_t,
                                                 const void *,
@@ -50,7 +50,7 @@ class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
                                                 const void *,
                                                 void *,
                                                 void *,
-                                                void *)> {};
+                                                void *)>> {};
 
 class FCImpl_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/FoldImpl.hpp b/include/aidge/backend/cpu/operator/FoldImpl.hpp
index 61701138..a5b33225 100644
--- a/include/aidge/backend/cpu/operator/FoldImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FoldImpl.hpp
@@ -27,13 +27,13 @@ namespace Aidge {
 class FoldImpl2DForward_cpu
     : public Registrable<FoldImpl2DForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::vector<DimSize_t> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class FoldImpl2D_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
index 758535de..6ce10711 100644
--- a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
@@ -26,12 +26,12 @@ namespace Aidge {
 class GlobalAveragePoolingImplForward_cpu
     : public Registrable<
           GlobalAveragePoolingImplForward_cpu, std::tuple<DataType, DataType>,
-          void(const std::vector<DimSize_t> &, const void *, void *)> {};
+          std::function<void(const std::vector<DimSize_t> &, const void *, void *)>> {};
 
 class GlobalAveragePoolingImplBackward_cpu
     : public Registrable<
           GlobalAveragePoolingImplBackward_cpu, std::tuple<DataType, DataType>,
-          void(const std::vector<DimSize_t> &, const void *, void *)> {};
+          std::function<void(const std::vector<DimSize_t> &, const void *, void *)>> {};
 
 class GlobalAveragePoolingImpl_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index c9ad909e..a3e95540 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -27,17 +27,17 @@ namespace Aidge {
 class LeakyReLUImplForward_cpu
     : public Registrable<LeakyReLUImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 class LeakyReLUImplBackward_cpu
     : public Registrable<LeakyReLUImplBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 
 class LeakyReLUImpl_cpu : public OperatorImpl {
 public:
@@ -47,7 +47,7 @@ public:
         return std::make_unique<LeakyReLUImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/LnImpl.hpp b/include/aidge/backend/cpu/operator/LnImpl.hpp
index faa03855..0c7b9709 100755
--- a/include/aidge/backend/cpu/operator/LnImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LnImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class LnImplForward_cpu
-    : public Registrable<LnImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<LnImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class LnImplBackward_cpu
-    : public Registrable<LnImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<LnImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const void*, const void*, void*)>> {
 };
 
 class LnImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<LnImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 	
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index e4b76d64..957002de 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -26,12 +26,12 @@ namespace Aidge {
 
 class MatMulImplForward_cpu
     : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType>,
-                         void(const std::size_t, const std::size_t, const std::size_t,
-                              const void *, const void *, void *)> {};
+                         std::function<void(const std::size_t, const std::size_t, const std::size_t,
+                              const void *, const void *, void *)>> {};
 class MatMulImplBackward_cpu
     : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType>,
-                         void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&,
-                              const void *, const void *, void *)> {};
+                         std::function<void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&,
+                              const void *, const void *, void *)>> {};
 
 class MatMulImpl_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index 4dd30e1f..c561da6a 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -30,21 +30,21 @@ namespace Aidge {
 class MaxPoolingImpl2DForward_cpu
     : public Registrable<MaxPoolingImpl2DForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const bool,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class MaxPoolingImpl2DBackward_cpu
     : public Registrable<MaxPoolingImpl2DBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const bool,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class MaxPoolingImpl2D_cpu : public OperatorImpl {
 public:
@@ -54,7 +54,7 @@ public:
         return std::make_unique<MaxPoolingImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index 2d42194c..6c4cd64c 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class MulImplForward_cpu
-    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
 };
 class MulImplBackward_cpu
-    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class MulImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<MulImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/OperatorImpl.hpp b/include/aidge/backend/cpu/operator/OperatorImpl.hpp
new file mode 100644
index 00000000..b11f952e
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/OperatorImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_IMPL_H_
+#define AIDGE_CPU_OPERATOR_IMPL_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <tuple>    // std::tuple
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+template <class Op, class FwdFunc, class BwdFunc>
+class OperatorImpl_cpu : public OperatorImpl,
+    public Registrable<OperatorImpl_cpu<Op, FwdFunc, BwdFunc>, ImplSpec, Impl<FwdFunc, BwdFunc>>
+{
+public:
+    OperatorImpl_cpu(const Op& op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<OperatorImpl_cpu<Op, FwdFunc, BwdFunc>> create(const Op& op) {
+        return std::make_unique<OperatorImpl_cpu<Op, FwdFunc, BwdFunc>>(op);
+    }
+
+    virtual std::shared_ptr<ProdConso> getProdConso() const override {
+        const auto impl = Registrar<OperatorImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+        return impl.prodConso(mOp);
+    }
+
+    virtual std::vector<ImplSpec> getAvailableImplSpecs() const override {
+        return Registrar<OperatorImpl_cpu>::getKeys();
+    }
+
+    void forward() override;
+    void backward() override;
+};
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_IMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index c6e41c29..a49c5ee8 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -24,17 +24,23 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
+class Pad_ProdConso_cpu : public ProdConso {
+public:
+    Pad_ProdConso_cpu(const Operator& op): ProdConso(op) {}
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+};
+
 // class Pad_Op;
 // compute kernel registry for forward and backward
 class PadImpl1DForward_cpu
     : public Registrable<PadImpl1DForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const PadBorderType,
                             const double,
                             const std::array<DimSize_t, 3> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class PadImpl1D_cpu : public OperatorImpl {
 public:
@@ -44,7 +50,7 @@ public:
         return std::make_unique<PadImpl1D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<Pad_ProdConso_cpu>(mOp); };
     void forward() override;
 };
 
@@ -58,21 +64,21 @@ static Registrar<Pad_Op<1>> registrarPadImpl1D_cpu("cpu", Aidge::PadImpl1D_cpu::
 class PadImpl2DForward_cpu
     : public Registrable<PadImpl2DForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 4>&,
+                         std::function<void(const std::array<DimSize_t, 4>&,
                             const PadBorderType,
                             const double,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class PadImpl2DBackward_cpu
     : public Registrable<PadImpl2DBackward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 4>&,
+                         std::function<void(const std::array<DimSize_t, 4>&,
                             const PadBorderType,
                             const double,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class PadImpl2D_cpu : public OperatorImpl {
 public:
@@ -82,7 +88,7 @@ public:
         return std::make_unique<PadImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<Pad_ProdConso_cpu>(mOp); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index 514e63af..120ca246 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class PowImplForward_cpu
-    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
 };
 class PowImplBackward_cpu
-    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class PowImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<PowImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
     void backward() override;
 };
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index e2ebf446..87cfd797 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -17,39 +17,63 @@
 #include <tuple>    // std::tuple
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-// class ReLU_Op;
-
-// compute kernel registry for forward and backward
-class ReLUImplForward_cpu
-    : public Registrable<ReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
-};
-class ReLUImplBackward_cpu
-    : public Registrable<ReLUImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
-};
-
-class ReLUImpl_cpu : public OperatorImpl {
-public:
-    ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) {
-        return std::make_unique<ReLUImpl_cpu>(op);
-    }
+using ReLUImpl_cpu = OperatorImpl_cpu<ReLU_Op,
+    void(const std::size_t, const void*, void*),
+    void(const std::size_t, const void*, const void*, void*)>;
+
+// class ReLUImpl_cpu : public OperatorImpl_cpu<ReLU_Op,
+//     void(const std::size_t, const void*, void*),
+//     void(const std::size_t, const void*, const void*, void*)>
+// {
+// public:
+//     void forward() override;
+//     void backward() override;
+// };
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+template <class I, class O>
+void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
 
-    void forward() override final;
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+//#pragma omp parallel for if (inputLenght > 1024)
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = (input[i] > 0) ? input[i] : 0;
+    }
+}
 
-    void backward() override final;
-};
+template <class I, class GI, class GO>
+void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
+                                  const void* input_, const void* grad_output_,
+				  void* grad_input_) {
+    const I* input = static_cast<const I*>(input_);
+    const GO* grad_output = static_cast<const GO*>(grad_output_);
+    GI* grad_input = static_cast<GI*>(grad_input_);
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        grad_input[i] = (input[i] > 0) ? grad_output[i] : 0;
+    }
+}
 
 namespace {
 static Registrar<ReLU_Op> registrarReLUImpl_cpu("cpu", Aidge::ReLUImpl_cpu::create);
+
+static Registrar<ReLUImpl_cpu> registrarReLUImpl_cpu_float(
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::ReLUImpl_cpu_forward_kernel<float, float>, Aidge::ReLUImpl_cpu_backward_kernel<float, float, float>});
+static Registrar<ReLUImpl_cpu> registrarReLUImpl_cpu_double(
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::ReLUImpl_cpu_forward_kernel<double, double>, Aidge::ReLUImpl_cpu_backward_kernel<double, double, double>});
+static Registrar<ReLUImpl_cpu> registrarReLUImpl_cpu_int(
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::ReLUImpl_cpu_forward_kernel<int, int>, Aidge::ReLUImpl_cpu_backward_kernel<int, int, int>});
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
deleted file mode 100644
index 1bd932e4..00000000
--- a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_
-
-#include <cstddef>  // std::size_t
-
-#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
-#include "aidge/utils/Registrar.hpp"
-
-namespace Aidge {
-template <class I, class GI, class GO>
-void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
-                                  const void* input_, const void* grad_output_,
-				  void* grad_input_) {
-    const I* input = static_cast<const I*>(input_);
-    const GO* grad_output = static_cast<const GO*>(grad_output_);
-    GI* grad_input = static_cast<GI*>(grad_input_);
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        grad_input[i] = (input[i] > 0) ? grad_output[i] : 0;
-    }
-}
-
-namespace {
-static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float32(
-    {DataType::Float32, DataType::Float32, DataType::Float32},
-    Aidge::ReLUImpl_cpu_backward_kernel<float, float, float>);
-static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Int32(
-    {DataType::Int32, DataType::Int32, DataType::Int32},
-    Aidge::ReLUImpl_cpu_backward_kernel<int, int, int>);
-static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float64(
-    {DataType::Float64, DataType::Float64, DataType::Float64},
-    Aidge::ReLUImpl_cpu_backward_kernel<double, double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
deleted file mode 100644
index af9c6559..00000000
--- a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_RELUIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_RELUIMPL_FORWARD_KERNEL_H_
-
-#include "aidge/utils/Registrar.hpp"
-
-#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
-
-namespace Aidge {
-template <class I, class O>
-void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght,
-                                     const void* input_,
-                                     void* output_) {
-
-    const I* input = static_cast<const I*>(input_);
-    O* output = static_cast<O*>(output_);
-
-//#pragma omp parallel for if (inputLenght > 1024)
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        output[i] = (input[i] > 0) ? input[i] : 0;
-    }
-}
-
-namespace {
-static Registrar<ReLUImplForward_cpu> registrarReLUImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReLUImpl_cpu_forward_kernel<float, float>);
-static Registrar<ReLUImplForward_cpu> registrarReLUImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReLUImpl_cpu_forward_kernel<int, int>);
-static Registrar<ReLUImplForward_cpu> registrarReLUImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReLUImpl_cpu_forward_kernel<double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
index 8d784c38..075dee20 100644
--- a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
@@ -29,19 +29,19 @@ namespace Aidge {
 class ReduceMeanImplForward_cpu
     : public Registrable<ReduceMeanImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int32_t>&,
+                        std::function<void(const std::vector<std::int32_t>&,
                             DimSize_t,
                             const std::vector<DimSize_t>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class ReduceMeanImpl1DBackward_cpu
     : public Registrable<ReduceMeanImpl1DBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int32_t>&,
+                        std::function<void(const std::vector<std::int32_t>&,
                             DimSize_t,
                             const std::vector<DimSize_t>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ReduceMeanImpl_cpu : public OperatorImpl {
    public:
@@ -60,30 +60,30 @@ class ReduceMeanImpl_cpu : public OperatorImpl {
 // class ReduceMeanImpl1DForward_cpu
 //     : public Registrable<ReduceMeanImpl1DForward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 // class ReduceMeanImpl1DBackward_cpu
 //     : public Registrable<ReduceMeanImpl1DBackward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+//                          std::function<void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)>> {};
 
 // // DIM 2
 // class ReduceMeanImpl2DForward_cpu
 //     : public Registrable<ReduceMeanImpl2DForward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 // class ReduceMeanImpl2DBackward_cpu
 //     : public Registrable<ReduceMeanImpl2DBackward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+//                          std::function<void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)>> {};
 // // DIM 3
 // class ReduceMeanImpl3DForward_cpu
 //     : public Registrable<ReduceMeanImpl3DForward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 // class ReduceMeanImpl3DBackward_cpu
 //     : public Registrable<ReduceMeanImpl3DBackward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 
 // class ReduceMeanImpl1D_cpu : public OperatorImpl {
 //    public:
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
index 85901692..ca53f2f1 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -28,21 +28,21 @@ namespace Aidge {
 class ScalingImplForward_cpu
     : public Registrable<ScalingImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             const std::size_t,
                             const bool,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 class ScalingImplBackward_cpu
     : public Registrable<ScalingImplBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             const std::size_t,
                             const bool,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 
 class ScalingImpl_cpu : public OperatorImpl {
 public:
@@ -52,7 +52,7 @@ public:
         return std::make_unique<ScalingImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
index 34340e61..b8a9d3bf 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SigmoidImplForward_cpu
-    : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class SigmoidImplBackward_cpu
-    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const void*, const void*, void*)>> {
 };
 
 class SigmoidImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<SigmoidImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 	
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp
index 61aed155..0f67f8c5 100644
--- a/include/aidge/backend/cpu/operator/SliceImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp
@@ -29,23 +29,23 @@ namespace Aidge {
 class SliceImplForward_cpu
     : public Registrable<SliceImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int64_t>&,
+                        std::function<void(const std::vector<std::int64_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<std::int8_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<DimSize_t>&,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 class SliceImplBackward_cpu
     : public Registrable<SliceImplBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int64_t>&,
+                        std::function<void(const std::vector<std::int64_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<std::int8_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<DimSize_t>&,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 
 class SliceImpl_cpu : public OperatorImpl {
 public:
@@ -55,7 +55,7 @@ public:
         return std::make_unique<SliceImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 2b2fab48..a09261d0 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SoftmaxImplForward_cpu
-    : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> {
+    : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)>> {
 };
 class SoftmaxImplBackward_cpu
-    : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> {
+    : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)>> {
 };
 
 class SoftmaxImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<SoftmaxImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
index 1691d951..5764fea4 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
@@ -26,10 +26,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SqrtImplForward_cpu
-    : public Registrable<SqrtImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SqrtImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class SqrtImplBackward_cpu
-    : public Registrable<SqrtImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SqrtImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 
 class SqrtImpl_cpu : public OperatorImpl {
@@ -40,7 +40,7 @@ public:
         return std::make_unique<SqrtImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
index 15c028ae..6f9b9a6d 100644
--- a/include/aidge/backend/cpu/operator/SubImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SubImplForward_cpu
-    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
 };
 class SubImplBackward_cpu
-    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class SubImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<SubImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp
index 0bf851e7..09864d3e 100644
--- a/include/aidge/backend/cpu/operator/TanhImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class TanhImplForward_cpu
-    : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class TanhImplBackward_cpu
-    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const void*, const void*, void*)>> {
 };
 
 class TanhImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<TanhImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 	
     void forward() override final;
 
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index d6d75a60..7074546f 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-Aidge::Elts_t  Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void  Aidge::AddImpl_cpu::forward() {
     const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
     AIDGE_ASSERT(opTensor.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation.");
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index feaa7e67..798ca01b 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -21,11 +21,6 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::AvgPoolingImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const AvgPooling_Op<2>&>(mOp);
     assert(op_.getInput(0) && "missing input #0");
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index 3046eea9..8291dded 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -21,11 +21,6 @@
 
 #include "aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::BatchNormImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const BatchNorm_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 for BatchNorm Operator");
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 591e8a06..ba3d4d8e 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -21,12 +21,6 @@
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
-
-Aidge::Elts_t Aidge::ConvDepthWiseImpl1D_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
     const auto& op_ = dynamic_cast<const ConvDepthWise_Op<1>&>(mOp);
 
@@ -75,11 +69,6 @@ void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
             );
 }
 
-Aidge::Elts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const ConvDepthWise_Op<2>&>(mOp);
 
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 0be31bef..3a6b331b 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::ConvImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvImpl1D_cpu::forward() {
     const auto& op_ = static_cast<const Conv_Op<1>&>(mOp);
 
@@ -75,11 +70,6 @@ AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Conv Operator.");
             );
 }
 
-Aidge::Elts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const Conv_Op<2>&>(mOp);
 
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index cfd74be4..3869b3a7 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -19,11 +19,6 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::DivImpl_cpu::forward() {
     // Find the correct kernel type
     // auto kernelFunc = Registrar<DivImplForward_cpu>::create({
diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp
index ace09846..b32e19b6 100644
--- a/src/operator/ErfImpl.cpp
+++ b/src/operator/ErfImpl.cpp
@@ -19,11 +19,6 @@
 #include "aidge/operator/Erf.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ErfImpl_cpu::forward() {
     const Erf_Op& op = static_cast<const Erf_Op&>(mOp);
 
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index 9d4f2a7e..e7088742 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -22,12 +22,6 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Registrar.hpp"
 
-
-Aidge::Elts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::LeakyReLUImpl_cpu::forward() {
     const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp);
 
diff --git a/src/operator/LnImpl.cpp b/src/operator/LnImpl.cpp
index 12885a94..ec1d5cf3 100644
--- a/src/operator/LnImpl.cpp
+++ b/src/operator/LnImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/LnImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::LnImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::LnImpl_cpu::forward() {
     const Ln_Op& op_ = static_cast<const Ln_Op&>(mOp);
 	std::shared_ptr<Tensor> in0 = op_.getInput(0);
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index 2e6d67ab..ec21feb9 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -19,12 +19,6 @@
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
-
-Aidge::Elts_t Aidge::MaxPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::MaxPoolingImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const MaxPooling_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in MaxPooling Operator.");
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index d7feb9b7..c6a820e2 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 #include "aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::MulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::MulImpl_cpu::forward() {
     // Find the correct kernel type
     auto kernelFunc = Registrar<MulImplForward_cpu>::create({
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
index b4b52d6b..7e5364e6 100644
--- a/src/operator/PadImpl.cpp
+++ b/src/operator/PadImpl.cpp
@@ -18,7 +18,7 @@
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::PadImpl1D_cpu::getNbRequiredProtected(Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::Pad_ProdConso_cpu::getNbRequiredProtected(Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(inputIdx == 0, "input index out of range."
         "{} Operator has only one input", mOp.type());
     (void) inputIdx;
@@ -49,18 +49,6 @@ void Aidge::PadImpl1D_cpu::forward() {
                 getCPUPtr(mOp.getRawOutput(0)));
 }
 
-Aidge::Elts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(inputIdx == 0, "input index out of range."
-        "{} Operator has only one input", mOp.type());
-    (void) inputIdx;
-
-    // Padding cannot be in-place!
-    // We must ensure that we do not override data that has not been consummed yet.
-    const auto inputSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size();
-    const auto outputSize = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size();
-    return Elts_t::DataElts(outputSize - inputSize);
-}
-
 void Aidge::PadImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const Pad_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Pad Operator.");
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index 811d1380..97f510b9 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 #include "aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::PowImpl_cpu::forward() {
     // Find the correct kernel type
     auto kernelFunc = Registrar<PowImplForward_cpu>::create({
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 4a0fb9f5..5f417047 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -19,14 +19,8 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
-#include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp"
-#include "aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp"
-
-Aidge::Elts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
 
+template <>
 void Aidge::ReLUImpl_cpu::forward() {
 	const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
     std::shared_ptr<Tensor> in0 = op_.getInput(0);
@@ -34,16 +28,15 @@ void Aidge::ReLUImpl_cpu::forward() {
     AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({
-        in0->dataType(),
-	    out0->dataType()});
+    const auto impl = Registrar<ReLUImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(in0->size(),
+    impl.forward(in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
 
+template <>
 void Aidge::ReLUImpl_cpu::backward() {
     const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
     std::shared_ptr<Tensor> in0  = op_.getInput(0);
@@ -53,12 +46,8 @@ void Aidge::ReLUImpl_cpu::backward() {
     AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({
-	in0->dataType(),
-        gra_int0->dataType(),
-	gra_out0->dataType()
-    });
+    const auto impl = Registrar<ReLUImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+    impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }
diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp
index db467083..30e0a30f 100644
--- a/src/operator/ScalingImpl.cpp
+++ b/src/operator/ScalingImpl.cpp
@@ -21,11 +21,6 @@
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
-Aidge::Elts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ScalingImpl_cpu::forward() {
     const auto& op_ = dynamic_cast<const Scaling_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Scaling Operator.");
diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp
index ad69935c..7e00f6f1 100644
--- a/src/operator/SigmoidImpl.cpp
+++ b/src/operator/SigmoidImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SigmoidImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SigmoidImpl_cpu::forward() {
 	const Sigmoid_Op& op_ = dynamic_cast<const Sigmoid_Op&>(mOp);
     std::shared_ptr<Tensor> in0 = op_.getInput(0);
diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp
index 8ffe4dcd..a7664262 100644
--- a/src/operator/SliceImpl.cpp
+++ b/src/operator/SliceImpl.cpp
@@ -19,11 +19,6 @@
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::SliceImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SliceImpl_cpu::forward() {
     const auto& op_ = dynamic_cast<const Slice_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Slice Operator.");
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index 5bc3699e..07eaec6c 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SoftmaxImpl_cpu::forward() {
     const auto& op_ = dynamic_cast<const Softmax_Op&>(mOp);
     AIDGE_ASSERT(!op_.getInput(0)->empty(), "Softmax input empty");
diff --git a/src/operator/SqrtImpl.cpp b/src/operator/SqrtImpl.cpp
index edb8858f..128135b2 100644
--- a/src/operator/SqrtImpl.cpp
+++ b/src/operator/SqrtImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SqrtImpl_cpu::forward() {
     std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
     std::shared_ptr<Tensor> out0 = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0));
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index ffddb59e..24f2c982 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SubImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SubImpl_cpu::forward() {
 
     // Find the correct kernel type
diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp
index a2469ed9..9fe054f1 100644
--- a/src/operator/TanhImpl.cpp
+++ b/src/operator/TanhImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::TanhImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::TanhImpl_cpu::forward() {
 	const Tanh_Op& op_ = dynamic_cast<const Tanh_Op&>(mOp);
     std::shared_ptr<Tensor> in0 = op_.getInput(0);
-- 
GitLab