Skip to content
Snippets Groups Projects
Commit 68ae2715 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Fix] Remove unused variable warnings and change some variable to const

parent 88f8f32c
No related branches found
No related tags found
No related merge requests found
Showing
with 96 additions and 96 deletions
......@@ -66,7 +66,7 @@ class AddImpl_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final {
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
......@@ -74,12 +74,12 @@ class AddImpl_cpu : public OperatorImpl {
return std::accumulate(inputDims.begin(), inputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final {
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final {
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......@@ -87,12 +87,12 @@ class AddImpl_cpu : public OperatorImpl {
return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
}
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final {
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final {
assert(inputIdx < mNbConsumedData.size());
return mNbConsumedData[inputIdx];
}
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final {
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final {
assert(outputIdx < mNbProducedData.size());
return mNbProducedData[outputIdx];
}
......@@ -119,16 +119,16 @@ class AddImpl_cpu<1> : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbRequiredData(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getRequiredMemory(IOIndex_t /*outputIdx*/,
const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx,
__attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbConsumedData(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbProducedData(IOIndex_t /*outputIdx*/) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void forward();
......@@ -150,16 +150,16 @@ class AddImpl_cpu<2> : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t /*outputIdx*/,
const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx,
__attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t /*outputIdx*/) const override final;
NbElts_t getNbProducedData(const IOIndex_t /*outputIdx*/) const override final;
void forward();
......@@ -181,15 +181,15 @@ class AddImpl_cpu<3> : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t /*inputIdx*/) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t /*inputIdx*/) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& /*inputsSize*/) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -49,11 +49,11 @@ class AvgPoolingImpl2D_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -64,11 +64,11 @@ class BatchNormImpl2D_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -51,11 +51,11 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -51,11 +51,11 @@ class ConvImpl2D_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -45,11 +45,11 @@ class FCImpl_cpu : public OperatorImpl {
static std::unique_ptr<FCImpl_cpu> create(const FC_Op &op) { return std::make_unique<FCImpl_cpu>(op); }
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -44,11 +44,11 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -32,11 +32,11 @@ class ProducerImpl_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -44,11 +44,11 @@ class ReLUImpl_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -44,11 +44,11 @@ class SoftmaxImpl_cpu : public OperatorImpl {
}
public:
NbElts_t getNbRequiredData(IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(IOIndex_t outputIdx) const override final;
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
void forward();
......
......@@ -31,12 +31,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredData(Aidge::IOIndex_t /*inpu
return static_cast<int>(std::static_pointer_cast<Tensor>(mOp.getInput(0))->size());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<1>::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
return std::static_pointer_cast<Tensor>(mOp.getOutput(0))->size();
}
......@@ -80,7 +80,7 @@ void Aidge::AddImpl_cpu<1>::backward() {
//////////////////////////////////
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
......@@ -90,12 +90,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredData(Aidge::IOIndex_t inputI
NbElts_t(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<2>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t>& inputsSize) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......@@ -147,7 +147,7 @@ void Aidge::AddImpl_cpu<2>::backward() {
//////////////////////////////////
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
......@@ -157,12 +157,12 @@ Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredData(Aidge::IOIndex_t inputI
Aidge::NbElts_t(1), std::multiplies<Aidge::NbElts_t>());
}
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
Aidge::NbElts_t Aidge::AddImpl_cpu<3>::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -20,7 +20,7 @@
#include "operator/AvgPooling.hpp"
#include "utils/Types.h"
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
......@@ -35,7 +35,7 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*
return 0;
}
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -20,7 +20,7 @@
#include "operator/BatchNorm.hpp"
#include "utils/Types.h"
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
......@@ -35,8 +35,8 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*i
return 0;
}
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &inputsSize) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -21,7 +21,7 @@
#include "operator/ConvDepthWise.hpp"
#include "utils/Types.h"
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
......@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t
return 0;
}
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &inputsSize) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -21,7 +21,7 @@
#include "operator/Conv.hpp"
#include "utils/Types.h"
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const {
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
assert(mOp.getInput(inputIdx) && "requires valid input");
// Requires the whole tensors
......@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI
return 0;
}
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &inputsSize) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -20,7 +20,7 @@
#include "operator/FCImpl_forward_kernels.hpp"
#include "utils/Types.h"
Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx) const
Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const
{
assert(mOp.getInput(inputIdx) && "requires valid input");
......@@ -36,7 +36,7 @@ Aidge::NbElts_t Aidge::FCImpl_cpu::getNbRequiredData(Aidge::IOIndex_t inputIdx)
}
Aidge::NbElts_t
Aidge::FCImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const
Aidge::FCImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const
{
// for the direct convolution algorithm, convolutions can be in-place, if
// there is no padding!
......@@ -44,7 +44,7 @@ Aidge::NbElts_t
}
Aidge::NbElts_t Aidge::FCImpl_cpu::getRequiredMemory(
IOIndex_t outputIdx, const std::vector<DimSize_t> & /*inputsSize*/) const
const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
{
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*i
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
const auto& outputDims = mOp.getOutput(0)->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
......
......@@ -42,7 +42,7 @@ std::size_t Aidge::ProducerImpl_cpu::getNbRequiredProtected(
std::size_t Aidge::ProducerImpl_cpu::getRequiredMemory(
IOIndex_t outputIdx, const std::vector<DimSize_t> & /*inputsSize*/) const
const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
{
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inputI
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
......
......@@ -33,12 +33,12 @@ Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredData(Aidge::IOIndex_t /*inp
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
}
Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(Aidge::IOIndex_t /*inputIdx*/) const {
Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex_t /*inputIdx*/) const {
// for the direct convolution algorithm, convolutions can be in-place, if there is no padding!
return 0;
}
Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(Aidge::IOIndex_t /*outputIdx*/, const std::vector<Aidge::DimSize_t>& /*inputsSize*/) const {
Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment