diff --git a/include/operator/AddImpl.hpp b/include/operator/AddImpl.hpp
index a089328c1985454b121115e5ce0d2cb331c6ee6b..06175682b82b2dc1eda55ca7b73840dca305f8c2 100644
--- a/include/operator/AddImpl.hpp
+++ b/include/operator/AddImpl.hpp
@@ -66,7 +66,7 @@ class AddImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final {
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final {
         assert(mOp.getInput(inputIdx) && "requires valid input");
 
         // Requires the whole tensors
@@ -74,12 +74,12 @@ class AddImpl_cpu : public OperatorImpl {
         return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
     }
 
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final {
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final {
         // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
         return 0;
     }
 
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final {
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final {
         // Requires the whole tensors, regardless of available data on inputs
         assert(outputIdx == 0 && "operator has only one output");
 
@@ -87,12 +87,12 @@ class AddImpl_cpu : public OperatorImpl {
         return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
     }
 
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final {
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < mNbConsumedData.size());
         return mNbConsumedData[inputIdx];
     }
 
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final {
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final {
         assert(outputIdx < mNbProducedData.size());
         return mNbProducedData[outputIdx];
     }
@@ -119,16 +119,16 @@ class AddImpl_cpu<1> : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getNbRequiredProtected(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getRequiredMemory(IOIndex_t /*outputIdx*/,
-                               const std::vector<DimSize_t>& /*inputsSize*/) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx,
+                               __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const override final;
 
-    NbElts_t getNbConsumedData(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getNbProducedData(IOIndex_t /*outputIdx*/) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
 
     void forward();
 
@@ -150,16 +150,16 @@ class AddImpl_cpu<2> : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getRequiredMemory(IOIndex_t /*outputIdx*/,
-                               const std::vector<DimSize_t>& /*inputsSize*/) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx,
+                               __attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
 
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbProducedData(IOIndex_t /*outputIdx*/) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
 
     void forward();
 
@@ -181,15 +181,15 @@ class AddImpl_cpu<3> : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbRequiredProtected(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
 
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/AvgPoolingImpl.hpp b/include/operator/AvgPoolingImpl.hpp
index a107bd83e64504a7640594735c7607d482c22eef..01192f7c0aab39ecc50458a6f7c2213dd4e71ffe 100644
--- a/include/operator/AvgPoolingImpl.hpp
+++ b/include/operator/AvgPoolingImpl.hpp
@@ -49,11 +49,11 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/BatchNormImpl.hpp b/include/operator/BatchNormImpl.hpp
index 61dafaf4ff6084e55db8f505ef3eebf478b62211..57f4aee207ec92b1eeb13d9c917cbc65c45ae88c 100644
--- a/include/operator/BatchNormImpl.hpp
+++ b/include/operator/BatchNormImpl.hpp
@@ -64,11 +64,11 @@ class BatchNormImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/ConvDepthWiseImpl.hpp b/include/operator/ConvDepthWiseImpl.hpp
index 4557b37ab6ebc7bf9e9e7539922ce057d66c148e..b1a2004df46e9f2a5e00103dc1628446416c8109 100644
--- a/include/operator/ConvDepthWiseImpl.hpp
+++ b/include/operator/ConvDepthWiseImpl.hpp
@@ -51,11 +51,11 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/ConvImpl.hpp b/include/operator/ConvImpl.hpp
index 9db557b760f5dae533ac74bc9fa3b2cd1f1d7ee4..81768f6889c7cd1ab10b47eb704bfddec801b899 100644
--- a/include/operator/ConvImpl.hpp
+++ b/include/operator/ConvImpl.hpp
@@ -51,11 +51,11 @@ class ConvImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/FCImpl.hpp b/include/operator/FCImpl.hpp
index f06861ef07795f14dd23a951155ed55bf284b495..d331b79ed0211691ed2728e278894e79c207c80b 100644
--- a/include/operator/FCImpl.hpp
+++ b/include/operator/FCImpl.hpp
@@ -45,11 +45,11 @@ class FCImpl_cpu : public OperatorImpl {
     static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) { return std::make_unique<FCImpl_cpu>(op); }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/LeakyReLUImpl.hpp b/include/operator/LeakyReLUImpl.hpp
index 7b71f4e7c89dc34c376ee6e56b81623ee7bd71eb..115926e9114115358b31387f932c79bbc7c67b94 100644
--- a/include/operator/LeakyReLUImpl.hpp
+++ b/include/operator/LeakyReLUImpl.hpp
@@ -44,11 +44,11 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/ProducerImpl.hpp b/include/operator/ProducerImpl.hpp
index 23be4b6da6bde301807969ec4cec1df2be4c23c1..6f424c8f83383be5e887c3f010f4f4b52834d9dc 100644
--- a/include/operator/ProducerImpl.hpp
+++ b/include/operator/ProducerImpl.hpp
@@ -32,11 +32,11 @@ class ProducerImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/ReLUImpl.hpp b/include/operator/ReLUImpl.hpp
index 714a44a26f7820598d686e0e0ac64be517898427..5797b9a932228837ba312ad920dc7c8cc14de7fb 100644
--- a/include/operator/ReLUImpl.hpp
+++ b/include/operator/ReLUImpl.hpp
@@ -44,11 +44,11 @@ class ReLUImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/operator/SoftmaxImpl.hpp b/include/operator/SoftmaxImpl.hpp
index eb4062b67de86331a56580ee5164e2f6208bb814..c6f881e7812198818573d633b2442841e71e5b4b 100644
--- a/include/operator/SoftmaxImpl.hpp
+++ b/include/operator/SoftmaxImpl.hpp
@@ -44,11 +44,11 @@ class SoftmaxImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 5b84bc8e9f0d8509774b99b27d0b06c97053b422..1dd998e956758a5a8414625f7c71d924ed43b8dd 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -31,12 +31,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredData(Aidge::IOIndex_t /*inpu
     return static_cast<int>(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size());
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     return std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
 }
@@ -80,7 +80,7 @@ void Aidge::AddImpl_cpu<1>::backward() {
 //////////////////////////////////
 
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -90,12 +90,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(Aidge::IOIndex_t inputI
                             NbElts_t(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t>& inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
@@ -147,7 +147,7 @@ void Aidge::AddImpl_cpu<2>::backward() {
 //////////////////////////////////
 
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -157,12 +157,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(Aidge::IOIndex_t inputI
                             Aidge::NbElts_t(1), std::multiplies<Aidge::NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index dfa6357e1207e1c844b09c68665cc734e3f35177..fafefc4b661b1669ee6ff6793b0117bb1c08547a 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -20,7 +20,7 @@
 #include "operator/AvgPooling.hpp"
 #include "utils/Types.h"
 
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -35,7 +35,7 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
+Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                            const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index d94f43892e45f5e9cd6eb4ed2982514d0abc037c..bdbb04fd0b34451ca7c0ec90ab3b21ddf73f712c 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -20,7 +20,7 @@
 #include "operator/BatchNorm.hpp"
 #include "utils/Types.h"
 
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -35,8 +35,8 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*i
     return 0;
 }
 
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                              const std::vector<Aidge::DimSize_t> &inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index ff5bf1e9097fc1a560cfa6ae84931420a905a327..667a45c2232f6bac11f4290fc0a016626085873d 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -21,7 +21,7 @@
 #include "operator/ConvDepthWise.hpp"
 #include "utils/Types.h"
 
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                           const std::vector<Aidge::DimSize_t> &inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index 213f0a360802a68e082338b2d3b70cec2b4c83d2..21f8db4a835cfea5ca6759a43bc37a780cfe80c4 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -21,7 +21,7 @@
 #include "operator/Conv.hpp"
 #include "utils/Types.h"
 
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                        const std::vector<Aidge::DimSize_t> &inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index 58a7f3cef86fd464957a7182df91c9a72c5328a6..86ee79c66188a845d4cfdbb60bf67b9b42ede499 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -20,7 +20,7 @@
 #include "operator/FCImpl_forward_kernels.hpp"
 #include "utils/Types.h"
 
-Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const
+Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const
 {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
@@ -36,7 +36,7 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx)
 }
 
 Aidge::NbElts_t
-    Aidge::FCImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const
+    Aidge::FCImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const
 {
     // for the direct convolution algorithm, convolutions can be in-place, if
     // there is no padding!
@@ -44,7 +44,7 @@ Aidge::NbElts_t
 }
 
 Aidge::NbElts_t Aidge::FCImpl_cpu::getRequiredMemory(
-    IOIndex_t outputIdx, const std::vector<DimSize_t> & /*inputsSize*/) const
+    const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
 {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index f13388351a54b4ffeab59880ca5c06ea68b46c8a..c05af23ae75d166761fa75a9b94ed32f2f9c9a6b 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*i
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
     const auto& outputDims = mOp.getOutput(0)->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(),
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
index cbdd05547bea971f825651707ff0bdc5a7a0cc62..6f067dbed6a9447da867dca681c5a9e86a3c3700 100644
--- a/src/operator/ProducerImpl.cpp
+++ b/src/operator/ProducerImpl.cpp
@@ -42,7 +42,7 @@ std::size_t Aidge::ProducerImpl_cpu::getNbRequiredProtected(
 
 
 std::size_t Aidge::ProducerImpl_cpu::getRequiredMemory(
-    IOIndex_t outputIdx, const std::vector<DimSize_t> & /*inputsSize*/) const
+    const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
 {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index 4d8472c03ed5ee1b151b46b948b92853afcf6037..1512cef9e3a8b7857331fc3224bc48a50966e3ae 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputI
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
     const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(),
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index f5f0bb73d448b7f367665c18fd05d24c03d296e0..79b868244ca7fd97a85b40c3afbb6ff8b41f243d 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inp
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
     const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(),
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());