From 50d213976495e82bbf688fc6ca04406616934e2b Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 20 Oct 2023 16:25:16 +0200
Subject: [PATCH] Update with default operator impl

---
 .../aidge/backend/cpu/operator/AddImpl.hpp    | 117 +--------------
 .../backend/cpu/operator/AvgPoolingImpl.hpp   |  18 +--
 .../backend/cpu/operator/BatchNormImpl.hpp    |  18 +--
 .../cpu/operator/ConvDepthWiseImpl.hpp        |  18 +--
 .../aidge/backend/cpu/operator/ConvImpl.hpp   |  15 +-
 include/aidge/backend/cpu/operator/FCImpl.hpp |  22 +--
 .../backend/cpu/operator/LeakyReLUImpl.hpp    |  17 +--
 .../aidge/backend/cpu/operator/MatMulImpl.hpp |  27 +---
 .../backend/cpu/operator/MaxPoolingImpl.hpp   |  18 +--
 .../aidge/backend/cpu/operator/PadImpl.hpp    |  18 +--
 .../backend/cpu/operator/ProducerImpl.hpp     |  21 +--
 .../aidge/backend/cpu/operator/ReLUImpl.hpp   |  17 +--
 .../backend/cpu/operator/ScalingImpl.hpp      |  20 +--
 .../backend/cpu/operator/SoftmaxImpl.hpp      |  17 +--
 src/operator/AddImpl.cpp                      | 140 ++----------------
 src/operator/AvgPoolingImpl.cpp               |  44 +-----
 src/operator/BatchNormImpl.cpp                |  46 +-----
 src/operator/ConvDepthWiseImpl.cpp            |  44 +-----
 src/operator/ConvImpl.cpp                     |  45 +-----
 src/operator/FCImpl.cpp                       |  82 +---------
 src/operator/LeakyReLUImpl.cpp                |  38 +----
 src/operator/MatMulImpl.cpp                   |  74 +--------
 src/operator/MaxPoolingImpl.cpp               |  44 +-----
 src/operator/PadImpl.cpp                      |  53 +------
 src/operator/ProducerImpl.cpp                 |  75 ----------
 src/operator/ReLUImpl.cpp                     |  37 +----
 src/operator/ScalingImpl.cpp                  |  47 +-----
 src/operator/SoftmaxImpl.cpp                  |  44 +-----
 28 files changed, 80 insertions(+), 1096 deletions(-)
 delete mode 100644 src/operator/ProducerImpl.cpp

diff --git a/include/aidge/backend/cpu/operator/AddImpl.hpp b/include/aidge/backend/cpu/operator/AddImpl.hpp
index 99c83e2c..9dbd2150 100644
--- a/include/aidge/backend/cpu/operator/AddImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AddImpl.hpp
@@ -53,152 +53,51 @@ class AddImplBackward_cpu<3>
 
 template <DimIdx_t NUM>
 class AddImpl_cpu : public OperatorImpl {
-   private:
-    const Add_Op<NUM>& mOp;
-    std::array<NbElts_t, NUM> mNbConsumedData = {};
-    std::array<NbElts_t, 1> mNbProducedData = {};
-
-   public:
-    AddImpl_cpu(const Add_Op<NUM>& op) : mOp(op) {}
+public:
+    AddImpl_cpu(const Add_Op<NUM>& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<AddImpl_cpu<NUM>> create(const Add_Op<NUM>& op) {
         return std::make_unique<AddImpl_cpu<NUM>>(op);
     }
-
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final {
-        assert(mOp.getInput(inputIdx) && "requires valid input");
-
-        // Requires the whole tensors
-        const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-        return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-    }
-
-    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final {
-        // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
-        return 0;
-    }
-
-    NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final {
-        // Requires the whole tensors, regardless of available data on inputs
-        assert(outputIdx == 0 && "operator has only one output");
-        (void) outputIdx;
-
-        const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-        return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-    }
-
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final {
-        assert(inputIdx < mNbConsumedData.size());
-        return mNbConsumedData[inputIdx];
-    }
-
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final {
-        assert(outputIdx < mNbProducedData.size());
-        return mNbProducedData[outputIdx];
-    }
-    void updateConsummerProducer() override final;
-
-    void forward() override {
-        // nothing
-    }
-
-    void backward() override { printf("Not implemented yet.\n"); }
 };
 
 template <>
 class AddImpl_cpu<1> : public OperatorImpl {
-   private:
-    const Add_Op<1>& mOp;
-    std::array<NbElts_t, 1> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    AddImpl_cpu(const Add_Op<1>& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+public:
+    AddImpl_cpu(const Add_Op<1>& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<AddImpl_cpu<1>> create(const Add_Op<1>& op) {
         return std::make_unique<AddImpl_cpu<1>>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
-
     NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
-
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/,
-                               const std::vector<DimSize_t> &/*inputsSize*/) const override final;
-
-    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
-
-    NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 template <>
 class AddImpl_cpu<2> : public OperatorImpl {
-   private:
-    const Add_Op<2>& mOp;
-    std::array<NbElts_t, 2> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    AddImpl_cpu(const Add_Op<2>& op) : mOp(op), mNbConsumedData({0, 0}), mNbProducedData({0}) {}
+public:
+    AddImpl_cpu(const Add_Op<2>& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<AddImpl_cpu<2>> create(const Add_Op<2>& op) {
         return std::make_unique<AddImpl_cpu<2>>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/,
-                               const std::vector<DimSize_t>& /*inputsSize*/) const override final;
-
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-
-    NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 template <>
 class AddImpl_cpu<3> : public OperatorImpl {
-   private:
-    const Add_Op<3>& mOp;
-    std::array<NbElts_t, 3> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    AddImpl_cpu(const Add_Op<3>& op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
+public:
+    AddImpl_cpu(const Add_Op<3>& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<AddImpl_cpu<3>> create(const Add_Op<3>& op) {
         return std::make_unique<AddImpl_cpu<3>>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-
     NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
-
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
-
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
index 635c246c..e3c3a6a2 100644
--- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp
@@ -36,29 +36,15 @@ class AvgPoolingImpl2DBackward_cpu
                          void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
 
 class AvgPoolingImpl2D_cpu : public OperatorImpl {
-   private:
-    const AvgPooling_Op<2> &mOp;
-    std::array<NbElts_t, 1> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+public:
+    AvgPoolingImpl2D_cpu(const AvgPooling_Op<2> &op) : OperatorImpl(op) {}
 
     static std::unique_ptr<AvgPoolingImpl2D_cpu> create(const AvgPooling_Op<2> &op) {
         return std::make_unique<AvgPoolingImpl2D_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
index f4611354..060e19b1 100644
--- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
+++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp
@@ -51,29 +51,15 @@ class BatchNormImpl2DBackward_cpu
                               void *)> {};
 
 class BatchNormImpl2D_cpu : public OperatorImpl {
-   private:
-    const BatchNorm_Op<2> &mOp;
-    std::array<NbElts_t, 5> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : mOp(op), mNbConsumedData({0, 0, 0, 0, 0}), mNbProducedData({0}) {}
+public:
+    BatchNormImpl2D_cpu(const BatchNorm_Op<2> &op) : OperatorImpl(op) {}
 
     static std::unique_ptr<BatchNormImpl2D_cpu> create(const BatchNorm_Op<2> &op) {
         return std::make_unique<BatchNormImpl2D_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
index a5a144f5..7b5dbfb0 100644
--- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp
@@ -38,29 +38,15 @@ class ConvDepthWiseImpl2DBackward_cpu
                               const void *, const void *, void *)> {};
 
 class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
-   private:
-    const ConvDepthWise_Op<2> &mOp;
-    std::array<NbElts_t, 3> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
+public:
+    ConvDepthWiseImpl2D_cpu(const ConvDepthWise_Op<2> &op) : OperatorImpl(op) {}
 
     static std::unique_ptr<ConvDepthWiseImpl2D_cpu> create(const ConvDepthWise_Op<2> &op) {
         return std::make_unique<ConvDepthWiseImpl2D_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp
index fba1fd6a..3db91ab5 100644
--- a/include/aidge/backend/cpu/operator/ConvImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp
@@ -38,29 +38,16 @@ class ConvImpl2DBackward_cpu
                               const void *, const void *, void *)> {};
 
 class ConvImpl2D_cpu : public OperatorImpl {
-   private:
-    const Conv_Op<2> &mOp;
-    std::array<NbElts_t, 3> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
    public:
-    ConvImpl2D_cpu(const Conv_Op<2> &op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
+    ConvImpl2D_cpu(const Conv_Op<2>& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<ConvImpl2D_cpu> create(const Conv_Op<2> &op) {
         return std::make_unique<ConvImpl2D_cpu>(op);
     }
 
    public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp
index 875456d1..5d793690 100644
--- a/include/aidge/backend/cpu/operator/FCImpl.hpp
+++ b/include/aidge/backend/cpu/operator/FCImpl.hpp
@@ -34,26 +34,14 @@ class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu,
                                                        const void *, const void *, const void *, void *)> {};
 
 class FCImpl_cpu : public OperatorImpl {
-   private:
-    const FC_Op &mOp;
-    std::array<NbElts_t, 3> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
+public:
+    FCImpl_cpu(const FC_Op &op) : OperatorImpl(op) {}
 
-   public:
-    FCImpl_cpu(const FC_Op &op) : mOp(op), mNbConsumedData({0, 0, 0}), mNbProducedData({0}) {}
+    static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) {
+        return std::make_unique<FCImpl_cpu>(op);
+    }
 
-    static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) { return std::make_unique<FCImpl_cpu>(op); }
-
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-	void updateConsummerProducer() override final;
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
index dc895c27..371e2905 100644
--- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp
@@ -31,28 +31,15 @@ class LeakyReLUImplBackward_cpu
 };
 
 class LeakyReLUImpl_cpu : public OperatorImpl {
-   private:
-    const LeakyReLU_Op& mOp;
-    std::array<NbElts_t, 1> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+public:
+    LeakyReLUImpl_cpu(const LeakyReLU_Op& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<LeakyReLUImpl_cpu> create(const LeakyReLU_Op& op) {
         return std::make_unique<LeakyReLUImpl_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
index 504406c7..2e4b3157 100644
--- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp
@@ -35,37 +35,14 @@ class MatMulImplBackward_cpu
                               const void *, const void *, void *)> {};
 
 class MatMulImpl_cpu : public OperatorImpl {
-private:
-    const MatMul_Op &mOp;
-    std::array<NbElts_t, 2> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
 public:
-    MatMulImpl_cpu(const MatMul_Op &op)
-        : mOp(op),
-          mNbConsumedData({0, 0}),
-          mNbProducedData({0})
-        {
-            // ctor
-        }
+    MatMulImpl_cpu(const MatMul_Op &op): OperatorImpl(op) {}
 
-    static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op)
-    {
+    static std::unique_ptr<MatMulImpl_cpu> create(const MatMul_Op &op) {
         return std::make_unique<MatMulImpl_cpu>(op);
     }
 
-public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/,
-                               const std::vector<DimSize_t> & /*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
-    void updateConsummerProducer() override final;
-
     void forward() override;
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
index ca448065..a96fcc22 100644
--- a/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl.hpp
@@ -36,29 +36,15 @@ class MaxPoolingImpl2DBackward_cpu
                          void(const MaxPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {};
 
 class MaxPoolingImpl2D_cpu : public OperatorImpl {
-   private:
-    const MaxPooling_Op<2> &mOp;
-    std::array<NbElts_t, 1> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+public:
+    MaxPoolingImpl2D_cpu(const MaxPooling_Op<2> &op) : OperatorImpl(op) {}
 
     static std::unique_ptr<MaxPoolingImpl2D_cpu> create(const MaxPooling_Op<2> &op) {
         return std::make_unique<MaxPoolingImpl2D_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/PadImpl.hpp b/include/aidge/backend/cpu/operator/PadImpl.hpp
index 1a3202a7..9d93828f 100644
--- a/include/aidge/backend/cpu/operator/PadImpl.hpp
+++ b/include/aidge/backend/cpu/operator/PadImpl.hpp
@@ -38,29 +38,15 @@ class PadImpl2DBackward_cpu
                               void *)> {};
 
 class PadImpl2D_cpu : public OperatorImpl {
-   private:
-    const Pad_Op<2> &mOp;
-    std::array<NbElts_t, 1> mNbConsumedData = {0};
-    std::array<NbElts_t, 1> mNbProducedData = {0};
-
-   public:
-    PadImpl2D_cpu(const Pad_Op<2> &op) : mOp(op) {}
+public:
+    PadImpl2D_cpu(const Pad_Op<2> &op) : OperatorImpl(op) {}
 
     static std::unique_ptr<PadImpl2D_cpu> create(const Pad_Op<2> &op) {
         return std::make_unique<PadImpl2D_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/ProducerImpl.hpp b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
index f23dfc26..431f11f6 100644
--- a/include/aidge/backend/cpu/operator/ProducerImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ProducerImpl.hpp
@@ -21,31 +21,18 @@
 
 namespace Aidge {
 class ProducerImpl_cpu : public OperatorImpl {
-   private:
-    const Producer_Op &mOp;
-
-   public:
-    ProducerImpl_cpu(const Producer_Op &op) : mOp(op) {}
+public:
+    ProducerImpl_cpu(const Producer_Op &op) : OperatorImpl(op) {}
 
     static std::unique_ptr<ProducerImpl_cpu> create(const Producer_Op &op) {
         return std::make_unique<ProducerImpl_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
-
-    void forward() override;
-
-    void backward() override;
+    void forward() override {};
 };
 
 namespace {
-static Registrar<Producer_Op> registrarProducer1DImpl_cpu("cpu", Aidge::ProducerImpl_cpu::create);
+static Registrar<Producer_Op> registrarProducerImpl_cpu("cpu", Aidge::ProducerImpl_cpu::create);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/backend/cpu/operator/ReLUImpl.hpp b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
index dd785d1d..6596c1c9 100644
--- a/include/aidge/backend/cpu/operator/ReLUImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ReLUImpl.hpp
@@ -31,28 +31,15 @@ class ReLUImplBackward_cpu
 };
 
 class ReLUImpl_cpu : public OperatorImpl {
-   protected:
-    const ReLU_Op& mOp;
-    std::array<NbElts_t, 1> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    ReLUImpl_cpu(const ReLU_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+public:
+    ReLUImpl_cpu(const ReLU_Op& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<ReLUImpl_cpu> create(const ReLU_Op& op) {
         return std::make_unique<ReLUImpl_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
index 58ca5851..e336adb0 100644
--- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp
+++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp
@@ -32,31 +32,15 @@ class ScalingImplBackward_cpu
 };
 
 class ScalingImpl_cpu : public OperatorImpl {
-   private:
-    const Scaling_Op& mOp;
-    std::array<NbElts_t, 1> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    ScalingImpl_cpu(const Scaling_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+public:
+    ScalingImpl_cpu(const Scaling_Op& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<ScalingImpl_cpu> create(const Scaling_Op& op) {
-        //std::cout << "ScalingImpl_cpu create" << std::endl;
         return std::make_unique<ScalingImpl_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-
-    void updateConsummerProducer() override final;
-
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
index e2b30a59..995f57f7 100644
--- a/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
+++ b/include/aidge/backend/cpu/operator/SoftmaxImpl.hpp
@@ -31,28 +31,15 @@ class SoftmaxImplBackward_cpu
 };
 
 class SoftmaxImpl_cpu : public OperatorImpl {
-   private:
-    const Softmax_Op& mOp;
-    std::array<NbElts_t, 1> mNbConsumedData;
-    std::array<NbElts_t, 1> mNbProducedData;
-
-   public:
-    SoftmaxImpl_cpu(const Softmax_Op& op) : mOp(op), mNbConsumedData({0}), mNbProducedData({0}) {}
+public:
+    SoftmaxImpl_cpu(const Softmax_Op& op) : OperatorImpl(op) {}
 
     static std::unique_ptr<SoftmaxImpl_cpu> create(const Softmax_Op& op) {
         return std::make_unique<SoftmaxImpl_cpu>(op);
     }
 
-   public:
-    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
-    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
-    void updateConsummerProducer() override final;
     void forward() override;
-
-    void backward() override;
 };
 
 namespace {
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index be792333..4be00781 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -25,38 +25,12 @@
 // AddImpl_cpu<1>
 //////////////////////////////////
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(mOp.getInput(0) && "requires valid input");
-    // Requires the whole tensors
-    return static_cast<int>(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size());
-}
-
 Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    return std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
-}
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-void Aidge::AddImpl_cpu<1>::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
 void Aidge::AddImpl_cpu<1>::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
@@ -68,11 +42,6 @@ void Aidge::AddImpl_cpu<1>::forward() {
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-}
-
-void Aidge::AddImpl_cpu<1>::backward() {
-    printf("Not implemented yet.\n");
 }
 
 
@@ -80,68 +49,26 @@ void Aidge::AddImpl_cpu<1>::backward() {
 // AddImpl_cpu<2>
 //////////////////////////////////
 
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(),
-                            NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    // this implementation of add can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx; // avoid unused warning
-
-    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(),
-                        NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-void Aidge::AddImpl_cpu<2>::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
 void Aidge::AddImpl_cpu<2>::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.mInputs[1] && "missing input #1");
+    assert(mOp.getInput(1) && "missing input #1");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<AddImplForward_cpu<2>>::create({
         mOp.getInput(0)->dataType(),
-        mOp.mInputs[1]->dataType(),
+        mOp.getInput(1)->dataType(),
         mOp.getOutput(0)->dataType()});
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.mInputs[1]->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-}
-
-void Aidge::AddImpl_cpu<2>::backward() {
-    printf("Not implemented yet.\n");
 }
 
 
@@ -149,70 +76,27 @@ void Aidge::AddImpl_cpu<2>::backward() {
 // AddImpl_cpu<3>
 //////////////////////////////////
 
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(),
-                            Aidge::NbElts_t(1), std::multiplies<Aidge::NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    // this implementation of add can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx; // avoid unused warning
-
-    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(),
-                        NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[inputIdx];
-}
-
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-void Aidge::AddImpl_cpu<3>::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<NbElts_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]+= getNbRequiredData(inputIdx); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
 void Aidge::AddImpl_cpu<3>::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.mInputs[1] && "missing input #1");
-    assert(mOp.mInputs[2] && "missing input #2");
+    assert(mOp.getInput(1) && "missing input #1");
+    assert(mOp.getInput(2) && "missing input #2");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<AddImplForward_cpu<3>>::create({
         mOp.getInput(0)->dataType(),
-        mOp.mInputs[1]->dataType(),
-        mOp.mInputs[2]->dataType(),
+        mOp.getInput(1)->dataType(),
+        mOp.getInput(2)->dataType(),
         mOp.getOutput(0)->dataType()});
 
     // Call kernel
     kernelFunc(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.mInputs[1]->getImpl()->rawPtr(),
-        mOp.mInputs[2]->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
+        mOp.getInput(2)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-}
-
-void Aidge::AddImpl_cpu<3>::backward() {
-    printf("Not implemented yet.\n");
 }
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index b1f82bbb..ae93934c 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -20,49 +20,12 @@
 #include "aidge/backend/cpu/operator/AvgPoolingImpl.hpp"
 #include "aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if
-    // there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-void Aidge::AvgPoolingImpl2D_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                                           // amount for a forward pass
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
 void Aidge::AvgPoolingImpl2D_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
@@ -70,11 +33,8 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() {
             Registrar<AvgPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(),
+    kernelFunc(dynamic_cast<const AvgPooling_Op<2>&>(mOp).getStaticAttributes(),
                mOp.getInput(0)->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(),
                mOp.getOutput(0)->getImpl()->rawPtr());
-
 }
-
-void Aidge::AvgPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index 90ee2b7a..c9d52b76 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -19,50 +19,12 @@
 #include "aidge/backend/cpu/operator/BatchNormImpl.hpp"
 #include "aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if
-    // there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                              const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-void Aidge::BatchNormImpl2D_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
 void Aidge::BatchNormImpl2D_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
     assert(mOp.getInput(1) && "missing input #1");
     assert(mOp.getInput(2) && "missing input #2");
@@ -76,7 +38,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
                                                           mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(),
+    kernelFunc(dynamic_cast<const BatchNorm_Op<2>&>(mOp).getStaticAttributes(),
                mOp.getInput(0)->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(),
                mOp.getInput(1)->getImpl()->rawPtr(),
@@ -85,8 +47,4 @@ void Aidge::BatchNormImpl2D_cpu::forward() {
                mOp.getInput(4)->getImpl()->rawPtr(),
                mOp.getOutput(0)->getImpl()->rawPtr(),
                true);
-
-
 }
-
-void Aidge::BatchNormImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 7801f64e..5ac109e2 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -21,50 +21,12 @@
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if
-    // there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-void Aidge::ConvDepthWiseImpl2D_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
 void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
     assert(mOp.getInput(1) && "missing input #1");
     assert(mOp.getInput(2) && "missing input #2");
@@ -77,9 +39,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() {
                                                           mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
+    kernelFunc(dynamic_cast<const ConvDepthWise_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
                mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
 }
-
-void Aidge::ConvDepthWiseImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index edab4432..347d4279 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -21,48 +21,11 @@
 #include "aidge/backend/cpu/operator/ConvImpl.hpp"
 #include "aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if
-    // there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-void Aidge::ConvImpl2D_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
 void Aidge::ConvImpl2D_cpu::forward() {
     // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
@@ -75,11 +38,7 @@ void Aidge::ConvImpl2D_cpu::forward() {
                                                           mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
+    kernelFunc(dynamic_cast<const Conv_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(),
                mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
-
-
 }
-
-void Aidge::ConvImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 3cf1ccf6..77ce5028 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -21,78 +21,17 @@
 #include "aidge/backend/cpu/operator/FCImpl.hpp"
 #include "aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const
-{
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto &inputDims
-        = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(
-        inputDims.begin(),
-        inputDims.end(),
-        Aidge::NbElts_t(1),
-        std::multiplies<Aidge::NbElts_t>());
-}
-
-Aidge::NbElts_t
-    Aidge::FCImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const
-{
-    // for the direct convolution algorithm, convolutions can be in-place, if
-    // there is no padding!
-    return 0;
-}
-
-Aidge::NbElts_t Aidge::FCImpl_cpu::getRequiredMemory(
-    const IOIndex_t outputIdx, const std::vector<DimSize_t> &/*inputsSize*/) const
-{
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(
-        outputDims.begin(),
-        outputDims.end(),
-        static_cast<NbElts_t>(1),
-        std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::FCImpl_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const
-{
-    assert((inputIdx != gk_IODefaultIndex) && (inputIdx < mNbConsumedData.size()));
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::FCImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const
-{
-    assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-
-void Aidge::FCImpl_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]
-            += getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
-                                              // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
 void Aidge::FCImpl_cpu::forward()
 {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.mInputs[1] && "missing input #1");
-    assert(mOp.mInputs[2] && "missing input #2");
+    assert(mOp.getInput(1) && "missing input #1");
+    assert(mOp.getInput(2) && "missing input #2");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<FCImplForward_cpu>::create(
         {mOp.getInput(0)->dataType(),
-         mOp.mInputs[1]->dataType(),
-         mOp.mInputs[2]->dataType(),
+         mOp.getInput(1)->dataType(),
+         mOp.getInput(2)->dataType(),
          mOp.getOutput(0)->dataType()});
 
     // Call kernel
@@ -107,18 +46,11 @@ void Aidge::FCImpl_cpu::forward()
     // }
     // else
     kernelFunc(
-        mOp.getStaticAttributes(),
+        dynamic_cast<const FC_Op&>(mOp).getStaticAttributes(),
         mOp.getInput(0)->dims()[0],
         mOp.getInput(0)->sizeM1(),
         mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.mInputs[1]->getImpl()->rawPtr(),
-        mOp.mInputs[2]->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
+        mOp.getInput(2)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-
-}
-
-void Aidge::FCImpl_cpu::backward()
-{
-    printf("Not implemented yet.\n");
 }
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index 316d3641..c81acf60 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -21,42 +21,12 @@
 #include "aidge/backend/cpu/operator/LeakyReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp"
 
-// FIXME: replace whole Tensor with minimum needed data quantity
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(mOp.getInput(0) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = mOp.getInput(0)->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    const auto& outputDims = mOp.getOutput(0)->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-void Aidge::LeakyReLUImpl_cpu::updateConsummerProducer(){
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
 void Aidge::LeakyReLUImpl_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
@@ -65,12 +35,8 @@ void Aidge::LeakyReLUImpl_cpu::forward() {
         mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(),
+    kernelFunc(dynamic_cast<const LeakyReLU_Op&>(mOp).getStaticAttributes(),
         std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 }
-
-void Aidge::LeakyReLUImpl_cpu::backward() {
-    printf("Not implemented yet.\n");
-}
diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp
index 972e1f0f..f4812629 100644
--- a/src/operator/MatMulImpl.cpp
+++ b/src/operator/MatMulImpl.cpp
@@ -21,76 +21,15 @@
 #include "aidge/backend/cpu/operator/MatMulImpl.hpp"
 #include "aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::MatMulImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const
-{
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto &inputDims
-        = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(
-        inputDims.begin(),
-        inputDims.end(),
-        Aidge::NbElts_t(1),
-        std::multiplies<Aidge::NbElts_t>());
-}
-
-Aidge::NbElts_t
-    Aidge::MatMulImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const
-{
-    // for the direct convolution algorithm, convolutions can be in-place, if
-    // there is no padding!
-    return 0;
-}
-
-Aidge::NbElts_t Aidge::MatMulImpl_cpu::getRequiredMemory(
-    const IOIndex_t outputIdx, const std::vector<DimSize_t> &/*inputsSize*/) const
-{
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(
-        outputDims.begin(),
-        outputDims.end(),
-        static_cast<NbElts_t>(1),
-        std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::MatMulImpl_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const
-{
-    assert((inputIdx != gk_IODefaultIndex) && (inputIdx < mNbConsumedData.size()));
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::MatMulImpl_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const
-{
-    assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-
-void Aidge::MatMulImpl_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (IOIndex_t inputIdx = 0; static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx]
-            += getNbRequiredData(static_cast<std::size_t>(inputIdx)); // each input is consumed by the minimum
-                                              // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
 void Aidge::MatMulImpl_cpu::forward()
 {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
-    assert(mOp.mInputs[1] && "missing input #1");
+    assert(mOp.getInput(1) && "missing input #1");
 
     // Find the correct kernel type
     auto kernelFunc = Registrar<MatMulImplForward_cpu>::create(
         {mOp.getInput(0)->dataType(),
-         mOp.mInputs[1]->dataType(),
+         mOp.getInput(1)->dataType(),
          mOp.getOutput(0)->dataType()});
 
     // Call kernel
@@ -105,17 +44,12 @@ void Aidge::MatMulImpl_cpu::forward()
     // }
     // else
     kernelFunc(
-        mOp.getStaticAttributes(),
+        dynamic_cast<const MatMul_Op&>(mOp).getStaticAttributes(),
         mOp.getInput(0)->dims()[0],
         mOp.getInput(0)->sizeM1(),
         mOp.getInput(0)->getImpl()->rawPtr(),
-        mOp.mInputs[1]->getImpl()->rawPtr(),
+        mOp.getInput(1)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
 
 
 }
-
-void Aidge::MatMulImpl_cpu::backward()
-{
-    printf("Not implemented yet.\n");
-}
diff --git a/src/operator/MaxPoolingImpl.cpp b/src/operator/MaxPoolingImpl.cpp
index 4604c010..c5127c1e 100644
--- a/src/operator/MaxPoolingImpl.cpp
+++ b/src/operator/MaxPoolingImpl.cpp
@@ -20,49 +20,12 @@
 #include "aidge/backend/cpu/operator/MaxPoolingImpl.hpp"
 #include "aidge/backend/cpu/operator/MaxPoolingImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::MaxPoolingImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getInput(inputIdx) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(inputIdx))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::MaxPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if
-    // there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::MaxPoolingImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::MaxPoolingImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::MaxPoolingImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-void Aidge::MaxPoolingImpl2D_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                                           // amount for a forward pass
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
 void Aidge::MaxPoolingImpl2D_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
@@ -70,11 +33,8 @@ void Aidge::MaxPoolingImpl2D_cpu::forward() {
             Registrar<MaxPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(),
+    kernelFunc(dynamic_cast<const MaxPooling_Op<2>&>(mOp).getStaticAttributes(),
                mOp.getInput(0)->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(),
                mOp.getOutput(0)->getImpl()->rawPtr());
-
 }
-
-void Aidge::MaxPoolingImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/PadImpl.cpp b/src/operator/PadImpl.cpp
index a7d2fb4e..7c2af9e2 100644
--- a/src/operator/PadImpl.cpp
+++ b/src/operator/PadImpl.cpp
@@ -21,61 +21,18 @@
 #include "aidge/backend/cpu/operator/PadImpl.hpp"
 #include "aidge/backend/cpu/operator/PadImpl_forward_kernels.hpp"
 
-Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(inputIdx == 0 && "operator has only one input");
-    (void) inputIdx;
-
-    // Requires the whole tensors
-    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbRequiredProtected(IOIndex_t inputIdx) const {
     assert(inputIdx == 0 && "operator has only one input");
     (void) inputIdx;
 
     // Padding cannot be in-place!
     // We must ensure that we do not override data that has not been consummed yet.
-    const auto &inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
-    const size_t inputSize = std::accumulate(inputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    const size_t outputSize = std::accumulate(outputDims.begin(), inputDims.end(), Aidge::NbElts_t(1), std::multiplies<NbElts_t>());
-
+    const auto inputSize = std::static_pointer_cast<Tensor>(mOp.getInput(0))->size();
+    const auto outputSize = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
     return (outputSize - inputSize);
 }
 
-Aidge::NbElts_t Aidge::PadImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
-                                                         const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
-    return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
-}
-
-Aidge::NbElts_t Aidge::PadImpl2D_cpu::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert((outputIdx == 0) && (static_cast<std::size_t>(outputIdx) < mNbProducedData.size()));
-    return mNbProducedData[static_cast<std::size_t>(outputIdx)];
-}
-
-void Aidge::PadImpl2D_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
-
 void Aidge::PadImpl2D_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
@@ -83,10 +40,6 @@ void Aidge::PadImpl2D_cpu::forward() {
             Registrar<PadImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
+    kernelFunc(dynamic_cast<const Pad_Op<2>&>(mOp).getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(),
                mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr());
-
-
 }
-
-void Aidge::PadImpl2D_cpu::backward() { printf("Not implemented yet.\n"); }
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
deleted file mode 100644
index 664f3745..00000000
--- a/src/operator/ProducerImpl.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include <cassert>
-#include <numeric> // std::accumulate
-#include <vector>
-
-#include "aidge/data/Tensor.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/Types.h"
-
-#include "aidge/backend/cpu/operator/ProducerImpl.hpp"
-
-
-std::size_t Aidge::ProducerImpl_cpu::getNbRequiredData(
-    Aidge::IOIndex_t /*inputIdx*/) const
-{
-    return 0;
-}
-
-
-Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbConsumedData(
-    Aidge::IOIndex_t /*inputIdx*/) const
-{
-    return 0;
-}
-
-
-std::size_t Aidge::ProducerImpl_cpu::getNbRequiredProtected(
-    Aidge::IOIndex_t /*inputIdx*/) const
-{
-    return 0;
-}
-
-
-std::size_t Aidge::ProducerImpl_cpu::getRequiredMemory(
-    const IOIndex_t outputIdx, const std::vector<DimSize_t> &/*inputsSize*/) const
-{
-    // Requires the whole tensors, regardless of available data on inputs
-    assert(outputIdx == 0 && "operator has only one output");
-    (void) outputIdx;
-
-    const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(
-        outputDims.begin(),
-        outputDims.end(),
-        NbElts_t(1),
-        std::multiplies<NbElts_t>());
-}
-
-Aidge::DimSize_t Aidge::ProducerImpl_cpu::getNbProducedData(
-    Aidge::IOIndex_t /*outputIdx*/) const
-{
-    return getRequiredMemory(0, {});
-}
-void Aidge::ProducerImpl_cpu::updateConsummerProducer(){
-}
-
-void Aidge::ProducerImpl_cpu::forward()
-{
-}
-
-
-void Aidge::ProducerImpl_cpu::backward()
-{
-    printf("Not implemented yet.\n");
-}
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index cea50bc1..647898d3 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -21,42 +21,12 @@
 #include "aidge/backend/cpu/operator/ReLUImpl.hpp"
 #include "aidge/backend/cpu/operator/ReLUImpl_forward_kernels.hpp"
 
-// FIXME: replace whole Tensor with minimum needed data quantity
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(mOp.getInput(0) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-void Aidge::ReLUImpl_cpu::updateConsummerProducer(){
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
 void Aidge::ReLUImpl_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
@@ -68,9 +38,4 @@ void Aidge::ReLUImpl_cpu::forward() {
     kernelFunc(mOp.getInput(0)->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-}
-
-void Aidge::ReLUImpl_cpu::backward() {
-    printf("Not implemented yet.\n");
 }
diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp
index 84cd6ee3..39c1326d 100644
--- a/src/operator/ScalingImpl.cpp
+++ b/src/operator/ScalingImpl.cpp
@@ -20,46 +20,12 @@
 #include "aidge/utils/Types.h"
 #include <vector>
 
-// FIXME: replace whole Tensor with minimum needed data quantity
-Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(mOp.getInput(0) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = mOp.getInput(0)->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ScalingImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
-    (void) outputIdx;
-    (void) inputsSize;
-    const auto& outputDims = mOp.getOutput(0)->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::ScalingImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-
-void Aidge::ScalingImpl_cpu::updateConsummerProducer(){
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
-
 void Aidge::ScalingImpl_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
 
     // Find the correct kernel type
@@ -68,17 +34,8 @@ void Aidge::ScalingImpl_cpu::forward() {
         mOp.getOutput(0)->dataType()});
 
     // Call kernel
-    kernelFunc(mOp.getStaticAttributes(),
+    kernelFunc(dynamic_cast<const Scaling_Op&>(mOp).getStaticAttributes(),
         std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(),
         mOp.getInput(0)->getImpl()->rawPtr(),
         mOp.getOutput(0)->getImpl()->rawPtr());
-
-
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
-
-void Aidge::ScalingImpl_cpu::backward() {
-    printf("Not implemented yet.\n");
 }
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index 03e8f930..45b455a3 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -21,45 +21,12 @@
 #include "aidge/backend/cpu/operator/SoftmaxImpl.hpp"
 #include "aidge/backend/cpu/operator/SoftmaxImpl_forward_kernels.hpp"
 
-// FIXME: replace whole Tensor with minimum needed data quantity
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputIdx*/) const {
-    assert(mOp.getInput(0) && "requires valid input");
-
-    // Requires the whole tensors
-    const auto& inputDims = std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims();
-
-    return std::accumulate(inputDims.begin(), inputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
 Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
-    // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
+    // this implementation can be in-place
     return 0;
 }
 
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
-    return std::accumulate(outputDims.begin(), outputDims.end(),
-                        static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
-}
-
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbConsumedData(Aidge::IOIndex_t /*inputIdx*/) const {
-    return mNbConsumedData[0];
-}
-
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbProducedData(Aidge::IOIndex_t /*outputIdx*/) const {
-    return mNbProducedData[0];
-}
-void Aidge::SoftmaxImpl_cpu::updateConsummerProducer(){
-    // Update producer-consumer data
-    for (std::size_t inputIdx = 0; inputIdx < mNbConsumedData.size(); ++inputIdx)
-        mNbConsumedData[inputIdx] += getNbRequiredData(static_cast<IOIndex_t>(inputIdx));  // each input is consumed by the minimum
-                                                                   // amount for a forward pass
-
-    mNbProducedData[0] += getRequiredMemory(0, {});
-}
 void Aidge::SoftmaxImpl_cpu::forward() {
-    // FIXME: uncomment the following code once memory handling will work
     assert(mOp.getInput(0) && "missing input #0");
     assert(mOp.getInput(0)->nbDims()>1);
 
@@ -77,13 +44,4 @@ void Aidge::SoftmaxImpl_cpu::forward() {
                featureSize,
                mOp.getInput(0)->getImpl()->rawPtr(),
                mOp.getOutput(0)->getImpl()->rawPtr());
-
-
-    mNbConsumedData[0]+= getNbRequiredData(0); // each input is consumed by the minimum amount for a forward pass
-
-    mNbProducedData[0]+= getRequiredMemory(0, {});
-}
-
-void Aidge::SoftmaxImpl_cpu::backward() {
-    printf("Not implemented yet.\n");
 }
-- 
GitLab