Skip to content
Snippets Groups Projects
Commit 230796c3 authored by Maxence Naud's avatar Maxence Naud
Browse files

[Upd] rm warning of unused parameter. [Add] some info of usage in imports

parent 67192853
No related branches found
No related tags found
No related merge requests found
Showing
with 41 additions and 42 deletions
......@@ -53,7 +53,7 @@ class ConvDepthWiseImpl2D_cpu : public OperatorImpl {
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
......
......@@ -53,7 +53,7 @@ class ConvImpl2D_cpu : public OperatorImpl {
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
......
......@@ -47,7 +47,7 @@ class FCImpl_cpu : public OperatorImpl {
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
......
......@@ -46,7 +46,7 @@ class LeakyReLUImpl_cpu : public OperatorImpl {
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
......
......@@ -34,7 +34,7 @@ class ProducerImpl_cpu : public OperatorImpl {
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
......
......@@ -46,7 +46,7 @@ class ReLUImpl_cpu : public OperatorImpl {
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
......
......@@ -46,7 +46,7 @@ class SoftmaxImpl_cpu : public OperatorImpl {
public:
NbElts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbRequiredProtected(const IOIndex_t inputIdx) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getRequiredMemory(__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t>& inputsSize) const override final;
NbElts_t getNbConsumedData(const IOIndex_t inputIdx) const override final;
NbElts_t getNbProducedData(const IOIndex_t outputIdx) const override final;
......
......@@ -10,9 +10,9 @@
********************************************************************************/
#include <cassert>
#include <numeric>
#include <chrono>
#include <thread>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/Conv.hpp"
......
......@@ -12,8 +12,7 @@
#include "aidge/operator/BatchNormImpl.hpp"
#include <cassert>
#include <numeric>
#include <thread>
#include <numeric> // std::accumulate
#include <vector>
#include "aidge/operator/BatchNormImpl_forward_kernels.hpp"
......@@ -35,8 +34,8 @@ Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*i
return 0;
}
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &inputsSize) const {
Aidge::NbElts_t Aidge::BatchNormImpl2D_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx,
__attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -12,9 +12,9 @@
#include "aidge/operator/ConvDepthWiseImpl.hpp"
#include <cassert>
#include <chrono>
#include <numeric>
#include <thread>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/ConvDepthWiseImpl_forward_kernels.hpp"
......@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t
return 0;
}
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &inputsSize) const {
Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx,
__attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -12,9 +12,9 @@
#include "aidge/operator/ConvImpl.hpp"
#include <cassert>
#include <chrono>
#include <numeric>
#include <thread>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/ConvImpl_forward_kernels.hpp"
......@@ -36,8 +36,8 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI
return 0;
}
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
const std::vector<Aidge::DimSize_t> &inputsSize) const {
Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx,
__attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -10,9 +10,9 @@
********************************************************************************/
#include <cassert>
#include <chrono>
#include <numeric>
#include <thread>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include <vector>
#include "aidge/operator/FC.hpp"
......@@ -44,7 +44,7 @@ Aidge::NbElts_t
}
Aidge::NbElts_t Aidge::FCImpl_cpu::getRequiredMemory(
const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const
{
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -10,9 +10,9 @@
********************************************************************************/
#include <cassert>
#include <numeric>
#include <chrono>
#include <thread>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include "aidge/operator/LeakyReLU.hpp"
......@@ -38,7 +38,7 @@ Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getNbRequiredProtected(const Aidge::IO
return 0;
}
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
Aidge::NbElts_t Aidge::LeakyReLUImpl_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
const auto& outputDims = mOp.getOutput(0)->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
......
......@@ -10,7 +10,7 @@
********************************************************************************/
#include <cassert>
#include <numeric>
#include <numeric> // std::accumulate
#include <vector>
#include "aidge/data/Tensor.hpp"
......@@ -42,7 +42,7 @@ std::size_t Aidge::ProducerImpl_cpu::getNbRequiredProtected(
std::size_t Aidge::ProducerImpl_cpu::getRequiredMemory(
const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const
__attribute__((unused)) const IOIndex_t outputIdx, __attribute__((unused)) const std::vector<DimSize_t> &inputsSize) const
{
// Requires the whole tensors, regardless of available data on inputs
assert(outputIdx == 0 && "operator has only one output");
......
......@@ -10,9 +10,9 @@
********************************************************************************/
#include <cassert>
#include <numeric>
#include <chrono>
#include <thread>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include "aidge/operator/ReLU.hpp"
......@@ -38,7 +38,7 @@ Aidge::NbElts_t Aidge::ReLUImpl_cpu::getNbRequiredProtected(const Aidge::IOIndex
return 0;
}
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
Aidge::NbElts_t Aidge::ReLUImpl_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
......
......@@ -10,9 +10,9 @@
********************************************************************************/
#include <cassert>
#include <numeric>
#include <chrono>
#include <thread>
#include <chrono> // std::chrono::milliseconds
#include <numeric> // std::accumulate
#include <thread> // std::this_thread::sleep_for
#include "aidge/operator/Softmax.hpp"
......@@ -38,7 +38,7 @@ Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getNbRequiredProtected(const Aidge::IOIn
return 0;
}
Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx, const std::vector<Aidge::DimSize_t> &inputsSize) const {
Aidge::NbElts_t Aidge::SoftmaxImpl_cpu::getRequiredMemory(__attribute__((unused)) const Aidge::IOIndex_t outputIdx, __attribute__((unused)) const std::vector<Aidge::DimSize_t> &inputsSize) const {
const auto& outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
return std::accumulate(outputDims.begin(), outputDims.end(),
static_cast<NbElts_t>(1), std::multiplies<NbElts_t>());
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment