diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 7a1497a2f4a2ae0e6005897ae504502505bbe60a..f71e3fcfd448e5c8f2d85b19cd6455160370bd59 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -26,10 +26,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class AddImplForward_cpu
-    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
+    : public Registrable<AddImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)>> {};
 
 class AddImplBackward_cpu
-    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)> {};
+    : public Registrable<AddImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::vector<const void*>, const std::vector<std::vector<std::size_t>>&, const std::size_t, const std::vector<std::size_t>&, void*)>> {};
 
 
 class AddImpl_cpu : public OperatorImpl {
@@ -40,7 +40,7 @@ public:
         return std::make_unique<AddImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index 12a5dc334619c16e6ad3a77f0cd76f4db7a87b77..7fbd7cd9d342c693ca32d1e463a81bd5c94b22ee 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -30,19 +30,19 @@ namespace Aidge {
 class AvgPoolingImpl2DForward_cpu
     : public Registrable<AvgPoolingImpl2DForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class AvgPoolingImpl2DBackward_cpu
     : public Registrable<AvgPoolingImpl2DBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class AvgPoolingImpl2D_cpu : public OperatorImpl {
 public:
@@ -52,7 +52,7 @@ public:
         return std::make_unique<AvgPoolingImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index 93bdab2d3f37e3bd8dc1e68ab68a05de8c8015ed..45860e331346c34b525e21b02e45029d003c9f64 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -30,7 +30,7 @@ namespace Aidge {
 class BatchNormImpl2DForward_cpu
     : public Registrable<BatchNormImpl2DForward_cpu,
                          std::tuple<DataType, DataType, DataType>,
-                         void(float,
+                         std::function<void(float,
                             float,
                             const std::array<DimSize_t, 4> &,
                             const void *,
@@ -39,11 +39,11 @@ class BatchNormImpl2DForward_cpu
                             void *,
                             void *,
                             void *,
-                            const bool)> {};
+                            const bool)>> {};
 class BatchNormImpl2DBackward_cpu
     : public Registrable<BatchNormImpl2DBackward_cpu,
                          std::tuple<DataType, DataType, DataType>,
-                         void(float,
+                         std::function<void(float,
                             float,
                             const std::array<DimSize_t, 4> &,
                             const void *,
@@ -51,7 +51,7 @@ class BatchNormImpl2DBackward_cpu
                             const void *,
                             void *,
                             void *,
-                            void *)> {};
+                            void *)>> {};
 
 class BatchNormImpl2D_cpu : public OperatorImpl {
 public:
@@ -61,7 +61,7 @@ public:
         return std::make_unique<BatchNormImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index ec886a310dd2edc616ced6ee447665eab3ce301a..5e59d502edaa74a03985cc30e6975034eb309e14 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -29,14 +29,14 @@ namespace Aidge {
 class ConvDepthWiseImpl1DForward_cpu
     : public Registrable<ConvDepthWiseImpl1DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 1>&,
+                         std::function<void(const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 3>&,
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvDepthWiseImpl1D_cpu : public OperatorImpl {
 public:
@@ -46,7 +46,7 @@ public:
         return std::make_unique<ConvDepthWiseImpl1D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
@@ -59,18 +59,18 @@ static Registrar<ConvDepthWise_Op<1>> registrarConvDepthWiseImpl1D_cpu("cpu", Ai
 class ConvDepthWiseImpl2DForward_cpu
     : public Registrable<ConvDepthWiseImpl2DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4> &,
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class ConvDepthWiseImpl2DBackward_cpu
     : public Registrable<ConvDepthWiseImpl2DBackward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             bool,
@@ -78,7 +78,7 @@ class ConvDepthWiseImpl2DBackward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
 public:
@@ -88,7 +88,7 @@ public:
         return std::make_unique<ConvDepthWiseImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index d7be46c251a82d1b631f4ad50e7175fa2f896d03..5cd1c804dcc53ae851b7b19bf4f5bb3c3d83fa6f 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -31,7 +31,7 @@ namespace Aidge {
 class ConvImpl1DForward_cpu
     : public Registrable<ConvImpl1DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 1>&,
+                         std::function<void(const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 1>&,
                             const std::array<DimSize_t, 3> &,
@@ -39,7 +39,7 @@ class ConvImpl1DForward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvImpl1D_cpu : public OperatorImpl {
    public:
@@ -50,7 +50,7 @@ class ConvImpl1D_cpu : public OperatorImpl {
     }
 
    public:
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
@@ -63,7 +63,7 @@ static Registrar<Conv_Op<1>> registrarConvImpl1D_cpu("cpu", Aidge::ConvImpl1D_cp
 class ConvImpl2DForward_cpu
     : public Registrable<ConvImpl2DForward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 4> &,
@@ -71,11 +71,11 @@ class ConvImpl2DForward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class ConvImpl2DBackward_cpu
     : public Registrable<ConvImpl2DBackward_cpu,
                          std::tuple<DataType, DataType, DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             bool,
@@ -83,7 +83,7 @@ class ConvImpl2DBackward_cpu
                             const void *,
                             const void *,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ConvImpl2D_cpu : public OperatorImpl {
    public:
@@ -94,7 +94,7 @@ class ConvImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/DivImpl.hpp b/include/aidge/backend/cpu/operator/DivImpl.hpp
index 3a19d7303464e3543bd1ce83e334c4a6bdb713a2..c969e19e051dd7914b0ee9a54c12dbb9c15af18f 100644
--- a/include/aidge/backend/cpu/operator/DivImpl.hpp
+++ b/include/aidge/backend/cpu/operator/DivImpl.hpp
@@ -25,11 +25,11 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class DivImplForward_cpu
-    // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
-    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)> {
+    // : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
+    : public Registrable<DivImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const std::size_t, const std::size_t, const void*, const void*,void*)>> {
 };
 class DivImplBackward_cpu
-    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<DivImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class DivImpl_cpu : public OperatorImpl {
@@ -40,7 +40,7 @@ public:
         return std::make_unique<DivImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 
     void forward() override final;
 };
diff --git a/include/aidge/backend/cpu/operator/ErfImpl.hpp b/include/aidge/backend/cpu/operator/ErfImpl.hpp
index 6864803a542e4beed0259be9c4722d4215bec449..11aed23d585739f3ae56b059bd0974a525736c40 100644
--- a/include/aidge/backend/cpu/operator/ErfImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ErfImpl.hpp
@@ -24,10 +24,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class ErfImplForward_cpu
-    : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<ErfImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class ErfImplBackward_cpu
-    : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<ErfImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 
 class ErfImpl_cpu : public OperatorImpl {
@@ -38,7 +38,7 @@ public:
         return std::make_unique<ErfImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index f21cd0ff330f61b942eb55f036c7b23458a5959a..fefd88b414aa103435329362ec75bd5ff8be6714 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -30,19 +30,19 @@ class FCImplForward_cpu : public Registrable<FCImplForward_cpu,
                                                         DataType,
                                                         DataType,
                                                         DataType>,
-                                             void(const DimSize_t,
+                                             std::function<void(const DimSize_t,
                                                 const DimSize_t,
                                                 const DimSize_t,
                                                 const void *,
                                                 const void *,
                                                 const void *,
-                                                void *)> {};
+                                                void *)>> {};
 class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
                                               std::tuple<DataType,
                                                          DataType,
                                                          DataType,
                                                          DataType>,
-                                              void(const DimSize_t,
+                                              std::function<void(const DimSize_t,
                                                 const DimSize_t,
                                                 const DimSize_t,
                                                 const void *,
@@ -50,7 +50,7 @@ class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
                                                 const void *,
                                                 void *,
                                                 void *,
-                                                void *)> {};
+                                                void *)>> {};
 
 class FCImpl_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/FoldImpl.hpp b/include/aidge/backend/cpu/operator/FoldImpl.hpp
index 61701138b0cc1c7f0b7dcea0609ca0d463137e08..a5b332251457e09d585f04c94d2e668a43f94cca 100644
--- a/include/aidge/backend/cpu/operator/FoldImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FoldImpl.hpp
@@ -27,13 +27,13 @@ namespace Aidge {
 class FoldImpl2DForward_cpu
     : public Registrable<FoldImpl2DForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const std::vector<DimSize_t> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class FoldImpl2D_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
index 758535de4cc506b8de4adf7004afbbfdd8185941..6ce10711b9a8605bf1d27585be47ec0385bd6151 100644
--- a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl.hpp
@@ -26,12 +26,12 @@ namespace Aidge {
 class GlobalAveragePoolingImplForward_cpu
     : public Registrable<
           GlobalAveragePoolingImplForward_cpu, std::tuple<DataType, DataType>,
-          void(const std::vector<DimSize_t> &, const void *, void *)> {};
+          std::function<void(const std::vector<DimSize_t> &, const void *, void *)>> {};
 
 class GlobalAveragePoolingImplBackward_cpu
     : public Registrable<
           GlobalAveragePoolingImplBackward_cpu, std::tuple<DataType, DataType>,
-          void(const std::vector<DimSize_t> &, const void *, void *)> {};
+          std::function<void(const std::vector<DimSize_t> &, const void *, void *)>> {};
 
 class GlobalAveragePoolingImpl_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index c9ad909eee631189a81067eda076c0b8cbb13377..a3e95540af1f8dfd4fa5e74b53c426b51ece4181 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -27,17 +27,17 @@ namespace Aidge {
 class LeakyReLUImplForward_cpu
     : public Registrable<LeakyReLUImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 class LeakyReLUImplBackward_cpu
     : public Registrable<LeakyReLUImplBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 
 class LeakyReLUImpl_cpu : public OperatorImpl {
 public:
@@ -47,7 +47,7 @@ public:
         return std::make_unique<LeakyReLUImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/LnImpl.hpp b/include/aidge/backend/cpu/operator/LnImpl.hpp
index faa03855a4f881f2a644ebc4023871b7acd6275c..0c7b9709ece37034713f46c79bfd2df770f36aa7 100755
--- a/include/aidge/backend/cpu/operator/LnImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LnImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class LnImplForward_cpu
-    : public Registrable<LnImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<LnImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class LnImplBackward_cpu
-    : public Registrable<LnImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<LnImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const void*, const void*, void*)>> {
 };
 
 class LnImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<LnImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 	
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index e4b76d64baadbcb1baa7d24180c4bb13ed47215b..957002dea1d9d94c4f99104bb4cb8afa30d8db7b 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -26,12 +26,12 @@ namespace Aidge {
 
 class MatMulImplForward_cpu
     : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType>,
-                         void(const std::size_t, const std::size_t, const std::size_t,
-                              const void *, const void *, void *)> {};
+                         std::function<void(const std::size_t, const std::size_t, const std::size_t,
+                              const void *, const void *, void *)>> {};
 class MatMulImplBackward_cpu
     : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType>,
-                         void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&,
-                              const void *, const void *, void *)> {};
+                         std::function<void(const std::vector<DimSize_t>&, const std::vector<DimSize_t>&,
+                              const void *, const void *, void *)>> {};
 
 class MatMulImpl_cpu : public OperatorImpl {
 public:
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index 4dd30e1fb939837f6861313eda04d7d05f3c8110..c561da6a2c9569515f636968c989b2d06f1f4813 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -30,21 +30,21 @@ namespace Aidge {
 class MaxPoolingImpl2DForward_cpu
     : public Registrable<MaxPoolingImpl2DForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const bool,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class MaxPoolingImpl2DBackward_cpu
     : public Registrable<MaxPoolingImpl2DBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::array<DimSize_t, 2>&,
+                        std::function<void(const std::array<DimSize_t, 2>&,
                             const std::array<DimSize_t, 2>&,
                             const bool,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class MaxPoolingImpl2D_cpu : public OperatorImpl {
 public:
@@ -54,7 +54,7 @@ public:
         return std::make_unique<MaxPoolingImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/MulImpl.hpp b/include/aidge/backend/cpu/operator/MulImpl.hpp
index 2d42194c417bd7d57c00f4325a4585cf59d95b24..6c4cd64ceebb4a1385dbd0386b6e5a2599544e6b 100644
--- a/include/aidge/backend/cpu/operator/MulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MulImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class MulImplForward_cpu
-    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<MulImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
 };
 class MulImplBackward_cpu
-    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<MulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class MulImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<MulImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/OperatorImpl.hpp b/include/aidge/backend/cpu/operator/OperatorImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b11f952e0a9fdd7c3e8793dc23a8f0d1fd3a3cb5
--- /dev/null
+++ b/include/aidge/backend/cpu/operator/OperatorImpl.hpp
@@ -0,0 +1,50 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CPU_OPERATOR_IMPL_H_
+#define AIDGE_CPU_OPERATOR_IMPL_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <tuple>    // std::tuple
+#include <vector>
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+namespace Aidge {
+template <class Op, class FwdFunc, class BwdFunc>
+class OperatorImpl_cpu : public OperatorImpl,
+    public Registrable<OperatorImpl_cpu<Op, FwdFunc, BwdFunc>, ImplSpec, Impl<FwdFunc, BwdFunc>>
+{
+public:
+    OperatorImpl_cpu(const Op& op) : OperatorImpl(op, "cpu") {}
+
+    static std::unique_ptr<OperatorImpl_cpu<Op, FwdFunc, BwdFunc>> create(const Op& op) {
+        return std::make_unique<OperatorImpl_cpu<Op, FwdFunc, BwdFunc>>(op);
+    }
+
+    virtual std::shared_ptr<ProdConso> getProdConso() const override {
+        const auto impl = Registrar<OperatorImpl_cpu>::create(getBestMatch(getRequiredSpec()));
+        return impl.prodConso(mOp);
+    }
+
+    virtual std::vector<ImplSpec> getAvailableImplSpecs() const override {
+        return Registrar<OperatorImpl_cpu>::getKeys();
+    }
+
+    void forward() override;
+    void backward() override;
+};
+}  // namespace Aidge
+
+#endif /* AIDGE_CPU_OPERATOR_IMPL_H_ */
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index c6e41c29fd203fdd80b2acb9ad0dfcac91a0f66c..a49c5ee87c418ba094b5930e995ada669299e1af 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -24,17 +24,23 @@
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 namespace Aidge {
+class Pad_ProdConso_cpu : public ProdConso {
+public:
+    Pad_ProdConso_cpu(const Operator& op): ProdConso(op) {}
+    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+};
+
 // class Pad_Op;
 // compute kernel registry for forward and backward
 class PadImpl1DForward_cpu
     : public Registrable<PadImpl1DForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 2>&,
+                         std::function<void(const std::array<DimSize_t, 2>&,
                             const PadBorderType,
                             const double,
                             const std::array<DimSize_t, 3> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class PadImpl1D_cpu : public OperatorImpl {
 public:
@@ -44,7 +50,7 @@ public:
         return std::make_unique<PadImpl1D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<Pad_ProdConso_cpu>(mOp); };
     void forward() override;
 };
 
@@ -58,21 +64,21 @@ static Registrar<Pad_Op<1>> registrarPadImpl1D_cpu("cpu", Aidge::PadImpl1D_cpu::
 class PadImpl2DForward_cpu
     : public Registrable<PadImpl2DForward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 4>&,
+                         std::function<void(const std::array<DimSize_t, 4>&,
                             const PadBorderType,
                             const double,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class PadImpl2DBackward_cpu
     : public Registrable<PadImpl2DBackward_cpu,
                          std::tuple<DataType, DataType>,
-                         void(const std::array<DimSize_t, 4>&,
+                         std::function<void(const std::array<DimSize_t, 4>&,
                             const PadBorderType,
                             const double,
                             const std::array<DimSize_t, 4> &,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class PadImpl2D_cpu : public OperatorImpl {
 public:
@@ -82,7 +88,7 @@ public:
         return std::make_unique<PadImpl2D_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<Pad_ProdConso_cpu>(mOp); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/PowImpl.hpp b/include/aidge/backend/cpu/operator/PowImpl.hpp
index 514e63af5ae5d1d1d00f7f328f5367df2bfa163d..120ca24695854b3258bdb1484ba82fa3a7c9dd87 100644
--- a/include/aidge/backend/cpu/operator/PowImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PowImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class PowImplForward_cpu
-    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<PowImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
 };
 class PowImplBackward_cpu
-    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<PowImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class PowImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<PowImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
     void backward() override;
 };
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index e2ebf44616db876b462157db650ff48362dd7bac..87cfd79720caa664add9b5dcfff8313ce5e011cf 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -17,39 +17,63 @@
 #include <tuple>    // std::tuple
 #include <vector>
 
-#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/cpu/operator/OperatorImpl.hpp"
 #include "aidge/operator/ReLU.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-// class ReLU_Op;
-
-// compute kernel registry for forward and backward
-class ReLUImplForward_cpu
-    : public Registrable<ReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
-};
-class ReLUImplBackward_cpu
-    : public Registrable<ReLUImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
-};
-
-class ReLUImpl_cpu : public OperatorImpl {
-public:
-    ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op, "cpu") {}
-
-    static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) {
-        return std::make_unique<ReLUImpl_cpu>(op);
-    }
+using ReLUImpl_cpu = OperatorImpl_cpu<ReLU_Op,
+    void(const std::size_t, const void*, void*),
+    void(const std::size_t, const void*, const void*, void*)>;
+
+// class ReLUImpl_cpu : public OperatorImpl_cpu<ReLU_Op,
+//     void(const std::size_t, const void*, void*),
+//     void(const std::size_t, const void*, const void*, void*)>
+// {
+// public:
+//     void forward() override;
+//     void backward() override;
+// };
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+template <class I, class O>
+void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght,
+                                     const void* input_,
+                                     void* output_) {
 
-    void forward() override final;
+    const I* input = static_cast<const I*>(input_);
+    O* output = static_cast<O*>(output_);
+
+//#pragma omp parallel for if (inputLenght > 1024)
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        output[i] = (input[i] > 0) ? input[i] : 0;
+    }
+}
 
-    void backward() override final;
-};
+template <class I, class GI, class GO>
+void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
+                                  const void* input_, const void* grad_output_,
+				  void* grad_input_) {
+    const I* input = static_cast<const I*>(input_);
+    const GO* grad_output = static_cast<const GO*>(grad_output_);
+    GI* grad_input = static_cast<GI*>(grad_input_);
+    for (std::size_t i = 0; i < inputLenght; ++i) {
+        grad_input[i] = (input[i] > 0) ? grad_output[i] : 0;
+    }
+}
 
 namespace {
 static Registrar<ReLU_Op> registrarReLUImpl_cpu("cpu", Aidge::ReLUImpl_cpu::create);
+
+static Registrar<ReLUImpl_cpu> registrarReLUImpl_cpu_float(
+    {DataType::Float32},
+    {ProdConso::inPlaceModel, Aidge::ReLUImpl_cpu_forward_kernel<float, float>, Aidge::ReLUImpl_cpu_backward_kernel<float, float, float>});
+static Registrar<ReLUImpl_cpu> registrarReLUImpl_cpu_double(
+    {DataType::Float64},
+    {ProdConso::inPlaceModel, Aidge::ReLUImpl_cpu_forward_kernel<double, double>, Aidge::ReLUImpl_cpu_backward_kernel<double, double, double>});
+static Registrar<ReLUImpl_cpu> registrarReLUImpl_cpu_int(
+    {DataType::Int32},
+    {ProdConso::inPlaceModel, Aidge::ReLUImpl_cpu_forward_kernel<int, int>, Aidge::ReLUImpl_cpu_backward_kernel<int, int, int>});
 }
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
deleted file mode 100644
index 1bd932e43608d98f737cc9046aed74b2fec6abc6..0000000000000000000000000000000000000000
--- a/include/aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_
-
-#include <cstddef>  // std::size_t
-
-#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
-#include "aidge/utils/Registrar.hpp"
-
-namespace Aidge {
-template <class I, class GI, class GO>
-void ReLUImpl_cpu_backward_kernel(const std::size_t inputLenght,
-                                  const void* input_, const void* grad_output_,
-				  void* grad_input_) {
-    const I* input = static_cast<const I*>(input_);
-    const GO* grad_output = static_cast<const GO*>(grad_output_);
-    GI* grad_input = static_cast<GI*>(grad_input_);
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        grad_input[i] = (input[i] > 0) ? grad_output[i] : 0;
-    }
-}
-
-namespace {
-static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float32(
-    {DataType::Float32, DataType::Float32, DataType::Float32},
-    Aidge::ReLUImpl_cpu_backward_kernel<float, float, float>);
-static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Int32(
-    {DataType::Int32, DataType::Int32, DataType::Int32},
-    Aidge::ReLUImpl_cpu_backward_kernel<int, int, int>);
-static Registrar<ReLUImplBackward_cpu> registrarReLUImplBackward_cpu_Float64(
-    {DataType::Float64, DataType::Float64, DataType::Float64},
-    Aidge::ReLUImpl_cpu_backward_kernel<double, double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_BACKWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
deleted file mode 100644
index af9c65590c7182185c9d79669dde49e592cbeb5d..0000000000000000000000000000000000000000
--- a/include/aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_CPU_OPERATOR_RELUIMPL_FORWARD_KERNEL_H_
-#define AIDGE_CPU_OPERATOR_RELUIMPL_FORWARD_KERNEL_H_
-
-#include "aidge/utils/Registrar.hpp"
-
-#include "aidge/backend/cpu/operator/ReLUImpl.hpp"
-
-namespace Aidge {
-template <class I, class O>
-void ReLUImpl_cpu_forward_kernel(std::size_t inputLenght,
-                                     const void* input_,
-                                     void* output_) {
-
-    const I* input = static_cast<const I*>(input_);
-    O* output = static_cast<O*>(output_);
-
-//#pragma omp parallel for if (inputLenght > 1024)
-    for (std::size_t i = 0; i < inputLenght; ++i) {
-        output[i] = (input[i] > 0) ? input[i] : 0;
-    }
-}
-
-namespace {
-static Registrar<ReLUImplForward_cpu> registrarReLUImplForward_cpu_Float32(
-        {DataType::Float32, DataType::Float32}, Aidge::ReLUImpl_cpu_forward_kernel<float, float>);
-static Registrar<ReLUImplForward_cpu> registrarReLUImplForward_cpu_Int32(
-        {DataType::Int32, DataType::Int32}, Aidge::ReLUImpl_cpu_forward_kernel<int, int>);
-static Registrar<ReLUImplForward_cpu> registrarReLUImplForward_cpu_Float64(
-        {DataType::Float64, DataType::Float64}, Aidge::ReLUImpl_cpu_forward_kernel<double, double>);
-}  // namespace
-}  // namespace Aidge
-
-#endif /* AIDGE_CPU_OPERATOR_RELUIMPL_FORWARD_KERNEL_H_ */
diff --git a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
index 8d784c38dc006ea82f040dfe83b4bef05908dd68..075dee200a10fb0b53d88499287fa33a0a715316 100644
--- a/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReduceMeanImpl.hpp
@@ -29,19 +29,19 @@ namespace Aidge {
 class ReduceMeanImplForward_cpu
     : public Registrable<ReduceMeanImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int32_t>&,
+                        std::function<void(const std::vector<std::int32_t>&,
                             DimSize_t,
                             const std::vector<DimSize_t>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 class ReduceMeanImpl1DBackward_cpu
     : public Registrable<ReduceMeanImpl1DBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int32_t>&,
+                        std::function<void(const std::vector<std::int32_t>&,
                             DimSize_t,
                             const std::vector<DimSize_t>&,
                             const void *,
-                            void *)> {};
+                            void *)>> {};
 
 class ReduceMeanImpl_cpu : public OperatorImpl {
    public:
@@ -60,30 +60,30 @@ class ReduceMeanImpl_cpu : public OperatorImpl {
 // class ReduceMeanImpl1DForward_cpu
 //     : public Registrable<ReduceMeanImpl1DForward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 // class ReduceMeanImpl1DBackward_cpu
 //     : public Registrable<ReduceMeanImpl1DBackward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+//                          std::function<void(const ReduceMean_Op<1>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)>> {};
 
 // // DIM 2
 // class ReduceMeanImpl2DForward_cpu
 //     : public Registrable<ReduceMeanImpl2DForward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 // class ReduceMeanImpl2DBackward_cpu
 //     : public Registrable<ReduceMeanImpl2DBackward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)> {};
+//                          std::function<void(const ReduceMean_Op<2>::Attrs &, const std::vector<DimSize_t>&, const void *,  void *)>> {};
 // // DIM 3
 // class ReduceMeanImpl3DForward_cpu
 //     : public Registrable<ReduceMeanImpl3DForward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 // class ReduceMeanImpl3DBackward_cpu
 //     : public Registrable<ReduceMeanImpl3DBackward_cpu,
 //                          std::tuple<DataType, DataType>,
-//                          void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)> {};
+//                          std::function<void(const ReduceMean_Op<3>::Attrs &, const std::vector<DimSize_t>&, const void *, void *)>> {};
 
 // class ReduceMeanImpl1D_cpu : public OperatorImpl {
 //    public:
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
index 8590169272818a225fe4299150f873733cdd9cd9..ca53f2f15c856a51cd304b19133048036e38244f 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -28,21 +28,21 @@ namespace Aidge {
 class ScalingImplForward_cpu
     : public Registrable<ScalingImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             const std::size_t,
                             const bool,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 class ScalingImplBackward_cpu
     : public Registrable<ScalingImplBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const float,
+                        std::function<void(const float,
                             const std::size_t,
                             const bool,
                             std::size_t,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 
 class ScalingImpl_cpu : public OperatorImpl {
 public:
@@ -52,7 +52,7 @@ public:
         return std::make_unique<ScalingImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
index 34340e6166a48b465c7723e85d91c195bfb42277..b8a9d3bf8e3e0cbde08a6c07b224f4708ecce34d 100644
--- a/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SigmoidImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SigmoidImplForward_cpu
-    : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SigmoidImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class SigmoidImplBackward_cpu
-    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<SigmoidImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const void*, const void*, void*)>> {
 };
 
 class SigmoidImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<SigmoidImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 	
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/SliceImpl.hpp b/include/aidge/backend/cpu/operator/SliceImpl.hpp
index 61aed1553bfbd2e67fc837ec6ea8d80b26ef3558..0f67f8c5541f739c127c50435c97862f26cb03a3 100644
--- a/include/aidge/backend/cpu/operator/SliceImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SliceImpl.hpp
@@ -29,23 +29,23 @@ namespace Aidge {
 class SliceImplForward_cpu
     : public Registrable<SliceImplForward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int64_t>&,
+                        std::function<void(const std::vector<std::int64_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<std::int8_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<DimSize_t>&,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 class SliceImplBackward_cpu
     : public Registrable<SliceImplBackward_cpu,
                         std::tuple<DataType, DataType>,
-                        void(const std::vector<std::int64_t>&,
+                        std::function<void(const std::vector<std::int64_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<std::int8_t>&,
                             const std::vector<std::int64_t>&,
                             const std::vector<DimSize_t>&,
                             const void*,
-                            void*)> {};
+                            void*)>> {};
 
 class SliceImpl_cpu : public OperatorImpl {
 public:
@@ -55,7 +55,7 @@ public:
         return std::make_unique<SliceImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index 2b2fab485656efdc37ee134cb4ae574b6b403405..a09261d0ec79869465c2bd6291f057dfa8387c90 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SoftmaxImplForward_cpu
-    : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> {
+    : public Registrable<SoftmaxImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)>> {
 };
 class SoftmaxImplBackward_cpu
-    : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)> {
+    : public Registrable<SoftmaxImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(std::size_t, const std::vector<DimSize_t>&, const void*, void*)>> {
 };
 
 class SoftmaxImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<SoftmaxImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/SqrtImpl.hpp b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
index 1691d951678509274736d558360c8110958820a9..5764fea4519b55389597db6ac0797239352b7dea 100644
--- a/include/aidge/backend/cpu/operator/SqrtImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SqrtImpl.hpp
@@ -26,10 +26,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SqrtImplForward_cpu
-    : public Registrable<SqrtImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SqrtImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class SqrtImplBackward_cpu
-    : public Registrable<SqrtImplBackward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<SqrtImplBackward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 
 class SqrtImpl_cpu : public OperatorImpl {
@@ -40,7 +40,7 @@ public:
         return std::make_unique<SqrtImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 
     void forward() override final;
 
diff --git a/include/aidge/backend/cpu/operator/SubImpl.hpp b/include/aidge/backend/cpu/operator/SubImpl.hpp
index 15c028ae6289f39e0b6e6fd74e51e138b1f2675c..6f9b9a6d5d3d18499e6a74a9139cee7253b5d95a 100644
--- a/include/aidge/backend/cpu/operator/SubImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SubImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class SubImplForward_cpu
-    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)> {
+    : public Registrable<SubImplForward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*,void*)>> {
 };
 class SubImplBackward_cpu
-    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)> {
+    : public Registrable<SubImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::vector<std::size_t>&, const std::vector<std::size_t>&, const std::vector<std::size_t>&, const void*, const void*, void*)>> {
 };
 
 class SubImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<SubImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
     void forward() override;
 };
 
diff --git a/include/aidge/backend/cpu/operator/TanhImpl.hpp b/include/aidge/backend/cpu/operator/TanhImpl.hpp
index 0bf851e77d94c160c0362301df33d682347daf0c..09864d3e50182df319762a2356c946c977b6253b 100644
--- a/include/aidge/backend/cpu/operator/TanhImpl.hpp
+++ b/include/aidge/backend/cpu/operator/TanhImpl.hpp
@@ -25,10 +25,10 @@ namespace Aidge {
 
 // compute kernel registry for forward and backward
 class TanhImplForward_cpu
-    : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, void(const std::size_t, const void*, void*)> {
+    : public Registrable<TanhImplForward_cpu, std::tuple<DataType, DataType>, std::function<void(const std::size_t, const void*, void*)>> {
 };
 class TanhImplBackward_cpu
-    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType, DataType>, void(const std::size_t, const void*, const void*, void*)> {
+    : public Registrable<TanhImplBackward_cpu, std::tuple<DataType, DataType, DataType>, std::function<void(const std::size_t, const void*, const void*, void*)>> {
 };
 
 class TanhImpl_cpu : public OperatorImpl {
@@ -39,7 +39,7 @@ public:
         return std::make_unique<TanhImpl_cpu>(op);
     }
 
-    Elts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    std::shared_ptr<ProdConso> getProdConso() const override { return std::make_unique<ProdConso>(mOp, true); };
 	
     void forward() override final;
 
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index d6d75a608e4da7d8b9ed8a28912ff2eb1751e042..7074546f48bfb0e003d7a12813f44f213bba7a54 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-Aidge::Elts_t  Aidge::AddImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void  Aidge::AddImpl_cpu::forward() {
     const auto& opTensor = static_cast<const OperatorTensor&>(mOp);
     AIDGE_ASSERT(opTensor.getInput(0)->hasImpl(), "cannot run Add forward because the 0-th input has no implementation.");
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index feaa7e67a8d0bc726462aed99e557493d3b8d0c6..798ca01baf971379db5c37245f3d4f3fa1cbdd6b 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -21,11 +21,6 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::AvgPoolingImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const AvgPooling_Op<2>&>(mOp);
     assert(op_.getInput(0) && "missing input #0");
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index 3046eea9bd241732daf39cce1783b5ee50de01c7..8291ddeda3c5f2a32398fd1955a4ddd1f0229c92 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -21,11 +21,6 @@
 
 #include "aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::BatchNormImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const BatchNorm_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 for BatchNorm Operator");
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 591e8a0637d1e52c75193ac1750a210a08815ccc..ba3d4d8effd10b2c71965e4a4135ef501052d777 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -21,12 +21,6 @@
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
-
-Aidge::Elts_t Aidge::ConvDepthWiseImpl1D_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
     const auto& op_ = dynamic_cast<const ConvDepthWise_Op<1>&>(mOp);
 
@@ -75,11 +69,6 @@ void Aidge::ConvDepthWiseImpl1D_cpu::forward() {
             );
 }
 
-Aidge::Elts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const ConvDepthWise_Op<2>&>(mOp);
 
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 0be31befe2019d70b628db878443f14b1d622f1c..3a6b331bd5e40a19113d231e22bb68dacc9fd914 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::ConvImpl1D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvImpl1D_cpu::forward() {
     const auto& op_ = static_cast<const Conv_Op<1>&>(mOp);
 
@@ -75,11 +70,6 @@ AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Conv Operator.");
             );
 }
 
-Aidge::Elts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ConvImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const Conv_Op<2>&>(mOp);
 
diff --git a/src/operator/DivImpl.cpp b/src/operator/DivImpl.cpp
index cfd74be45b29852c89e4a27035ce2d38fc7266cc..3869b3a74b5f780fa987403a26002a1756225b90 100644
--- a/src/operator/DivImpl.cpp
+++ b/src/operator/DivImpl.cpp
@@ -19,11 +19,6 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::DivImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::DivImpl_cpu::forward() {
     // Find the correct kernel type
     // auto kernelFunc = Registrar<DivImplForward_cpu>::create({
diff --git a/src/operator/ErfImpl.cpp b/src/operator/ErfImpl.cpp
index ace098468c05b80c4116e6f85d00b5fabaf754cd..b32e19b6df573017736780bebdcae8842477c9e6 100644
--- a/src/operator/ErfImpl.cpp
+++ b/src/operator/ErfImpl.cpp
@@ -19,11 +19,6 @@
 #include "aidge/operator/Erf.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::ErfImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ErfImpl_cpu::forward() {
     const Erf_Op& op = static_cast<const Erf_Op&>(mOp);
 
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index 9d4f2a7edcdf263751ec1d9cea10cd4d60055610..e70887423fccfc012388bb080bcb23fdefe72747 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -22,12 +22,6 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/Registrar.hpp"
 
-
-Aidge::Elts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::LeakyReLUImpl_cpu::forward() {
     const LeakyReLU_Op& op_ = dynamic_cast<const LeakyReLU_Op&>(mOp);
 
diff --git a/src/operator/LnImpl.cpp b/src/operator/LnImpl.cpp
index 12885a944be46a977463e900af4047319bb1c8b2..ec1d5cf37f806b32c0c790326bef943e7de7e21a 100644
--- a/src/operator/LnImpl.cpp
+++ b/src/operator/LnImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/LnImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/LnImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::LnImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::LnImpl_cpu::forward() {
     const Ln_Op& op_ = static_cast<const Ln_Op&>(mOp);
 	std::shared_ptr<Tensor> in0 = op_.getInput(0);
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index 2e6d67abbdd6776a1f75449a0f4562143cbaae87..ec21feb917bd015929f45b7217f74bd51abbae55 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -19,12 +19,6 @@
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
-
-Aidge::Elts_t Aidge::MaxPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::MaxPoolingImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const MaxPooling_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in MaxPooling Operator.");
diff --git a/src/operator/MulImpl.cpp b/src/operator/MulImpl.cpp
index d7feb9b76e25a0e874b3682cdc5b3e53bf8e9228..c6a820e2856dd00dbd465910605980787ba37797 100644
--- a/src/operator/MulImpl.cpp
+++ b/src/operator/MulImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/MulImpl.hpp"
 #include "aidge/backend/cpu/operator/MulImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::MulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::MulImpl_cpu::forward() {
     // Find the correct kernel type
     auto kernelFunc = Registrar<MulImplForward_cpu>::create({
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
index b4b52d6be855b6a1f8c0a71a6a9169ee9690f34c..7e5364e647d73dd23f3c89058b251944c7ab07a3 100644
--- a/src/operator/PadImpl.cpp
+++ b/src/operator/PadImpl.cpp
@@ -18,7 +18,7 @@
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::PadImpl1D_cpu::getNbRequiredProtected(Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::Pad_ProdConso_cpu::getNbRequiredProtected(Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(inputIdx == 0, "input index out of range."
         "{} Operator has only one input", mOp.type());
     (void) inputIdx;
@@ -49,18 +49,6 @@ void Aidge::PadImpl1D_cpu::forward() {
                 getCPUPtr(mOp.getRawOutput(0)));
 }
 
-Aidge::Elts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(Aidge::IOIndex_t inputIdx) const {
-    AIDGE_ASSERT(inputIdx == 0, "input index out of range."
-        "{} Operator has only one input", mOp.type());
-    (void) inputIdx;
-
-    // Padding cannot be in-place!
-    // We must ensure that we do not override data that has not been consummed yet.
-    const auto inputSize = std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->size();
-    const auto outputSize = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->size();
-    return Elts_t::DataElts(outputSize - inputSize);
-}
-
 void Aidge::PadImpl2D_cpu::forward() {
     const auto& op_ = dynamic_cast<const Pad_Op<2>&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Pad Operator.");
diff --git a/src/operator/PowImpl.cpp b/src/operator/PowImpl.cpp
index 811d13804cffdd2477fc830f1779b0fb6271eb0b..97f510b96c2cad74534ce3a17e6e47f80fa6fbe2 100644
--- a/src/operator/PowImpl.cpp
+++ b/src/operator/PowImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/PowImpl.hpp"
 #include "aidge/backend/cpu/operator/PowImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::PowImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::PowImpl_cpu::forward() {
     // Find the correct kernel type
     auto kernelFunc = Registrar<PowImplForward_cpu>::create({
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 4a0fb9f5d929e2ce731a21b5553e1b9257a32daa..5f4170472cc99ee179faaf41853a5088c743eb2a 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -19,14 +19,8 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
-#include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp"
-#include "aidge/backend/cpu/operator/ReLUImpl_backward_kernels.hpp"
-
-Aidge::Elts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
 
+template <>
 void Aidge::ReLUImpl_cpu::forward() {
 	const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
     std::shared_ptr<Tensor> in0 = op_.getInput(0);
@@ -34,16 +28,15 @@ void Aidge::ReLUImpl_cpu::forward() {
     AIDGE_ASSERT(in0, "missing input #0");
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<ReLUImplForward_cpu>::create({
-        in0->dataType(),
-	    out0->dataType()});
+    const auto impl = Registrar<ReLUImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(in0->size(),
+    impl.forward(in0->size(),
         getCPUPtr(mOp.getRawInput(0)),
         getCPUPtr(mOp.getRawOutput(0)));
 }
 
+template <>
 void Aidge::ReLUImpl_cpu::backward() {
     const ReLU_Op& op_ = dynamic_cast<const ReLU_Op&>(mOp);
     std::shared_ptr<Tensor> in0  = op_.getInput(0);
@@ -53,12 +46,8 @@ void Aidge::ReLUImpl_cpu::backward() {
     AIDGE_ASSERT(out0, "missing output #0 for current {} operator", op_.type());
 
     // Find the correct kernel type
-    auto kernelFunc = Registrar<ReLUImplBackward_cpu>::create({
-	in0->dataType(),
-        gra_int0->dataType(),
-	gra_out0->dataType()
-    });
+    const auto impl = Registrar<ReLUImpl_cpu>::create(getBestMatch(getRequiredSpec()));
 
     // Call kernel
-    kernelFunc(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
+    impl.backward(gra_int0->size(), getCPUPtr(in0), getCPUPtr(gra_out0), getCPUPtr(gra_int0));
 }
diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp
index db4670836e702f536243aadec36c5ba85b2344c8..30e0a30fa9d3fc14df458ad4364f050a78d8b0a4 100644
--- a/src/operator/ScalingImpl.cpp
+++ b/src/operator/ScalingImpl.cpp
@@ -21,11 +21,6 @@
 #include "aidge/utils/Types.h"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
-Aidge::Elts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::ScalingImpl_cpu::forward() {
     const auto& op_ = dynamic_cast<const Scaling_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Scaling Operator.");
diff --git a/src/operator/SigmoidImpl.cpp b/src/operator/SigmoidImpl.cpp
index ad69935c02e392d7aa1c9601acb827c5baf8970f..7e00f6f1944bb73c40324a9d5cb45a0f24a4626a 100644
--- a/src/operator/SigmoidImpl.cpp
+++ b/src/operator/SigmoidImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/SigmoidImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/SigmoidImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SigmoidImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SigmoidImpl_cpu::forward() {
 	const Sigmoid_Op& op_ = dynamic_cast<const Sigmoid_Op&>(mOp);
     std::shared_ptr<Tensor> in0 = op_.getInput(0);
diff --git a/src/operator/SliceImpl.cpp b/src/operator/SliceImpl.cpp
index 8ffe4dcdd97b58758885b013d0c1770bd98a83ba..a7664262a63e0e3b0e3ffdc775b7c11702b00e6a 100644
--- a/src/operator/SliceImpl.cpp
+++ b/src/operator/SliceImpl.cpp
@@ -19,11 +19,6 @@
 #include "aidge/utils/Log.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Elts_t Aidge::SliceImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SliceImpl_cpu::forward() {
     const auto& op_ = dynamic_cast<const Slice_Op&>(mOp);
     AIDGE_ASSERT(op_.getInput(0), "missing input #0 in Slice Operator.");
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index 5bc3699e2146e36a63b4a1602ca1cb86e3ff1e2f..07eaec6cbb9ab0b10705d51b0749ac9ae5b83daa 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SoftmaxImpl_cpu::forward() {
     const auto& op_ = dynamic_cast<const Softmax_Op&>(mOp);
     AIDGE_ASSERT(!op_.getInput(0)->empty(), "Softmax input empty");
diff --git a/src/operator/SqrtImpl.cpp b/src/operator/SqrtImpl.cpp
index edb8858fc4ac07fa5725d24688b22d64134afb0e..128135b2b5e415e3aaebcfd9975ec70950577ce9 100644
--- a/src/operator/SqrtImpl.cpp
+++ b/src/operator/SqrtImpl.cpp
@@ -22,11 +22,6 @@
 #include "aidge/backend/cpu/operator/SqrtImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/SqrtImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SqrtImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SqrtImpl_cpu::forward() {
     std::shared_ptr<Tensor> in0 = std::static_pointer_cast<Tensor>(mOp.getRawInput(0));
     std::shared_ptr<Tensor> out0 = std::static_pointer_cast<Tensor>(mOp.getRawOutput(0));
diff --git a/src/operator/SubImpl.cpp b/src/operator/SubImpl.cpp
index ffddb59ee3373c4a0a6c2653747744a43fd471d9..24f2c982a4f305d4a27b579bbe6b61a41a96de41 100644
--- a/src/operator/SubImpl.cpp
+++ b/src/operator/SubImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/SubImpl.hpp"
 #include "aidge/backend/cpu/operator/SubImpl_forward_kernels.hpp"
 
-Aidge::Elts_t Aidge::SubImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::SubImpl_cpu::forward() {
 
     // Find the correct kernel type
diff --git a/src/operator/TanhImpl.cpp b/src/operator/TanhImpl.cpp
index a2469ed9b83679c0edf8d0a761abf9d3d046db6e..9fe054f103a2e1c5500dd86bb70735455f316cb2 100644
--- a/src/operator/TanhImpl.cpp
+++ b/src/operator/TanhImpl.cpp
@@ -23,11 +23,6 @@
 #include "aidge/backend/cpu/operator/TanhImpl_forward_kernels.hpp"
 #include "aidge/backend/cpu/operator/TanhImpl_backward_kernels.hpp"
 
-Aidge::Elts_t Aidge::TanhImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // this implementation can be in-place
-    return Elts_t::DataElts(0);
-}
-
 void Aidge::TanhImpl_cpu::forward() {
 	const Tanh_Op& op_ = dynamic_cast<const Tanh_Op&>(mOp);
     std::shared_ptr<Tensor> in0 = op_.getInput(0);