diff --git a/include/aidge/operator/AddImpl.hpp b/include/aidge/operator/AddImpl.hpp
index b5933aad54b1b91f4e493f50ad8585de083b6966..8bd954c0d1dba40fe666e5aad7be47a65033e607 100644
--- a/include/aidge/operator/AddImpl.hpp
+++ b/include/aidge/operator/AddImpl.hpp
@@ -66,7 +66,7 @@ class AddImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final {
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final {
         assert(mOp.getInput(inputIdx) && "requires valid input");
 
         // Requires the whole tensors
@@ -74,12 +74,12 @@ class AddImpl_cpu : public OperatorImpl {
         return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
     }
 
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final {
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final {
         // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
         return 0;
     }
 
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final {
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final {
         // Requires the whole tensors, regardless of available data on inputs
         assert(outputIdx == 0 && "operator has only one output");
 
@@ -87,12 +87,12 @@ class AddImpl_cpu : public OperatorImpl {
         return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
     }
 
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final {
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final {
         assert(inputIdx < mNbConsumedData.size());
         return mNbConsumedData[inputIdx];
     }
 
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final {
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final {
         assert(outputIdx < mNbProducedData.size());
         return mNbProducedData[outputIdx];
     }
@@ -119,16 +119,16 @@ class AddImpl_cpu<1> : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getNbRequiredProtected(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getRequiredMemory(IOIndex_t /*outputIdx*/,
-                               const std::vector<DimSize_t>& /*inputsSize*/) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx,
+                               __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const override final;
 
-    NbElts_t getNbConsumedData(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getNbProducedData(IOIndex_t /*outputIdx*/) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
 
     void forward();
 
@@ -150,16 +150,16 @@ class AddImpl_cpu<2> : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getRequiredMemory(IOIndex_t /*outputIdx*/,
-                               const std::vector<DimSize_t>& /*inputsSize*/) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx,
+                               __attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
 
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbProducedData(IOIndex_t /*outputIdx*/) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
 
     void forward();
 
@@ -181,15 +181,15 @@ class AddImpl_cpu<3> : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbRequiredProtected(IOIndex_t /*inputIdx*/) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
 
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
 
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
 
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/AvgPoolingImpl.hpp b/include/aidge/operator/AvgPoolingImpl.hpp
index 58faa8b2e29d315aced712142016fb6f68782c9c..5cde8bbd7b482a70b234f988cb3f54178a2c50ee 100644
--- a/include/aidge/operator/AvgPoolingImpl.hpp
+++ b/include/aidge/operator/AvgPoolingImpl.hpp
@@ -49,11 +49,11 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/BatchNormImpl.hpp b/include/aidge/operator/BatchNormImpl.hpp
index 80ea54aeea9d75b8bc32d4a9ebbf0bd9daf16d83..37d644f00f4a53b0f0b5c64928ec5c77e719ceb5 100644
--- a/include/aidge/operator/BatchNormImpl.hpp
+++ b/include/aidge/operator/BatchNormImpl.hpp
@@ -64,11 +64,11 @@ class BatchNormImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/ConvDepthWiseImpl.hpp b/include/aidge/operator/ConvDepthWiseImpl.hpp
index 0f219b00e4cf627c3f9c783d2f7be254f12637fa..e9822ffa75885bc7f11090a29b16bb6e9b38e454 100644
--- a/include/aidge/operator/ConvDepthWiseImpl.hpp
+++ b/include/aidge/operator/ConvDepthWiseImpl.hpp
@@ -51,11 +51,11 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/ConvImpl.hpp b/include/aidge/operator/ConvImpl.hpp
index d40cc818c26fba7cb4d7b8729230c5994a2ec2d0..d3fea6ac3487b15ad0e2b902f65ae7d1bf8dc283 100644
--- a/include/aidge/operator/ConvImpl.hpp
+++ b/include/aidge/operator/ConvImpl.hpp
@@ -51,11 +51,11 @@ class ConvImpl2D_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/FCImpl.hpp b/include/aidge/operator/FCImpl.hpp
index eea2009d04681b76a1894c380631c4582dd8cf7f..a0135d96232306d975284eb1240b03c1326a2f0d 100644
--- a/include/aidge/operator/FCImpl.hpp
+++ b/include/aidge/operator/FCImpl.hpp
@@ -45,11 +45,11 @@ class FCImpl_cpu : public OperatorImpl {
     static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) { return std::make_unique<FCImpl_cpu>(op); }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/LeakyReLUImpl.hpp b/include/aidge/operator/LeakyReLUImpl.hpp
index e810fd7e23ebdf914cd52120aa28eebb205a2237..a04b810be35cb76bec839aeb1d3ab94ee967e906 100644
--- a/include/aidge/operator/LeakyReLUImpl.hpp
+++ b/include/aidge/operator/LeakyReLUImpl.hpp
@@ -44,11 +44,11 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/ProducerImpl.hpp b/include/aidge/operator/ProducerImpl.hpp
index f2764b34b774c35cc26503bf60a650f4a9e67817..9cce69c6bc1d71277d88df4a8f27698c7db2fa66 100644
--- a/include/aidge/operator/ProducerImpl.hpp
+++ b/include/aidge/operator/ProducerImpl.hpp
@@ -32,11 +32,11 @@ class ProducerImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/ReLUImpl.hpp b/include/aidge/operator/ReLUImpl.hpp
index d62092a5df13c51c24c4b760b3ac46c76bc66f2b..eb0b61b2d7d61554955e01735e755aaea67a7eec 100644
--- a/include/aidge/operator/ReLUImpl.hpp
+++ b/include/aidge/operator/ReLUImpl.hpp
@@ -44,11 +44,11 @@ class ReLUImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/include/aidge/operator/SoftmaxImpl.hpp b/include/aidge/operator/SoftmaxImpl.hpp
index 76309f311707e9da7ea6cac40dd38066b90605c0..5020802c84aef4ccd77403db987c47fedf0cf8f6 100644
--- a/include/aidge/operator/SoftmaxImpl.hpp
+++ b/include/aidge/operator/SoftmaxImpl.hpp
@@ -44,11 +44,11 @@ class SoftmaxImpl_cpu : public OperatorImpl {
     }
 
    public:
-    NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
-    NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
-    NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
-    NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
+    NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
+    NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
+    NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
+    NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
 
     void forward();
 
diff --git a/src/operator/AddImpl.cpp b/src/operator/AddImpl.cpp
index 770ed91a1f1cbcab558c1416c4b83f73d655ef17..47717cef1566512b0f0d37beb11fb3d28832436e 100644
--- a/src/operator/AddImpl.cpp
+++ b/src/operator/AddImpl.cpp
@@ -31,12 +31,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredData(Aidge::IOIndex_t /*inpu
     return static_cast<int>(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size());
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     return std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
 }
@@ -80,7 +80,7 @@ void Aidge::AddImpl_cpu<1>::backward() {
 //////////////////////////////////
 
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -90,12 +90,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(Aidge::IOIndex_t inputI
                             NbElts_t(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t>& inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
@@ -147,7 +147,7 @@ void Aidge::AddImpl_cpu<2>::backward() {
 //////////////////////////////////
 
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -157,12 +157,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(Aidge::IOIndex_t inputI
                             Aidge::NbElts_t(1), std::multiplies<Aidge::NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index d9d345b7b551abc31099e7c6f7f8e5fbbe126654..2e1e901d35f2ac8620f1c4be53413ce58e9260f9 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/AvgPooling.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -35,7 +35,7 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*
     return 0;
 }
 
-Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
+Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                            const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp
index ccf03f59591aadf1c3e4609f1bf8781924ac4a71..2b104f3c205bad8147b388c46592111e22da2782 100644
--- a/src/operator/BatchNormImpl.cpp
+++ b/src/operator/BatchNormImpl.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/BatchNorm.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -35,8 +35,8 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*i
     return 0;
 }
 
-Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                              const std::vector<Aidge::DimSize_t> &inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index 32ee0bc876296a8a3ddb365b1efdecf8d103bbc7..4dd687fd99bd775a67663e2e05a09b1541ec2f12 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -21,7 +21,7 @@
 #include "aidge/operator/ConvDepthWise.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                           const std::vector<Aidge::DimSize_t> &inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index e38fe612d6aedb176af60d9d563e5ac6347cb792..0c892673b049b1789ca6ab5bab8b279835d87ce9 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -21,7 +21,7 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
+Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
     // Requires the whole tensors
@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
-                                                           const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
+                                                        const std::vector<Aidge::DimSize_t> &inputsSize) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
 
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index d615142bd6b358e7d86539952ec03ee0915ca8d8..0f1bf6047e2e73dfbdb4ea8054cac28364735bcc 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/FCImpl_forward_kernels.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const
+Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const
 {
     assert(mOp.getInput(inputIdx) && "requires valid input");
 
@@ -36,7 +36,7 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx)
 }
 
 Aidge::NbElts_t
-    Aidge::FCImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const
+    Aidge::FCImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const
 {
     // for the direct convolution algorithm, convolutions can be in-place, if
     // there is no padding!
@@ -44,7 +44,7 @@ Aidge::NbElts_t
 }
 
 Aidge::NbElts_t Aidge::FCImpl_cpu::getRequiredMemory(
-    IOIndex_t outputIdx, const std::vector<DimSize_t> & /*inputsSize*/) const
+    const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
 {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp
index b3a6e036f5503ded7f014cc0cc85cf7408e69c30..c7493f12773fe372c6a07767c0151ac34471b65b 100644
--- a/src/operator/LeakyReLUImpl.cpp
+++ b/src/operator/LeakyReLUImpl.cpp
@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*i
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
     const auto& outputDims = mOp.getOutput(0)->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(),
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
index 067bf8f37c18ac97779a4b54290b5f7af48aef59..b2abcdf3facb5c119957b4c3bc9ae33e9e8ddb4a 100644
--- a/src/operator/ProducerImpl.cpp
+++ b/src/operator/ProducerImpl.cpp
@@ -42,7 +42,7 @@ std::size_t Aidge::ProducerImpl_cpu::getNbRequiredProtected(
 
 
 std::size_t Aidge::ProducerImpl_cpu::getRequiredMemory(
-    IOIndex_t outputIdx, const std::vector<DimSize_t> & /*inputsSize*/) const
+    const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
 {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
diff --git a/src/operator/ReLUImpl.cpp b/src/operator/ReLUImpl.cpp
index dff369006746f62e9054615eef1d604aa232f5ce..90763a918bca3bd80f9b1d5a8b22417bafb91427 100644
--- a/src/operator/ReLUImpl.cpp
+++ b/src/operator/ReLUImpl.cpp
@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputI
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
     const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(),
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
diff --git a/src/operator/SoftmaxImpl.cpp b/src/operator/SoftmaxImpl.cpp
index db91c61ec0ed2c121985e6924b0217e7b9d62be1..860ee7913df44e26e861d228e29831e1b90cdf52 100644
--- a/src/operator/SoftmaxImpl.cpp
+++ b/src/operator/SoftmaxImpl.cpp
@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inp
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
 }
 
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
+Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
     // for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
     return 0;
 }
 
-Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
+Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
     const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(),
                         static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());