Skip to content
Snippets Groups Projects
Commit 43b467f6 authored by Maxence Naud's avatar Maxence Naud
Browse files

Remove warning in release compile mode

parent 4d09c2a9
No related branches found
No related tags found
1 merge request!105version 0.2.0
Pipeline #43321 passed
......@@ -331,6 +331,8 @@ class Tensor : public Data,
return div_.getOutput(0)->clone();
}
~Tensor() noexcept;
public:
/**
* @brief Perform a deep copy of the tensor.
......
......@@ -16,27 +16,11 @@
#include <memory>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
namespace Aidge {
inline void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
std::uint32_t& fanIn, std::uint32_t& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize);
fanOut = static_cast<std::uint32_t>(tensor->size() / channelSize);
}
void calculateFanInFanOut(std::shared_ptr<Tensor> tensor,
std::uint32_t& fanIn, std::uint32_t& fanOut);
enum class VarianceNorm { FanIn, Average, FanOut };
......
......@@ -193,7 +193,11 @@ public:
typedef std::map<std::shared_ptr<Node>, std::vector<MemoryPlane>,
CompByNodeName> MemMap_T;
public:
MemoryManager(): mClock(0) {}
~MemoryManager() noexcept;
public:
/// Generates a new MemorySpace
std::shared_ptr<MemorySpace> reserve(unsigned int size,
const std::set<std::shared_ptr<Node> >&
......
......@@ -53,13 +53,16 @@ protected:
std::chrono::time_point<std::chrono::high_resolution_clock> start_,
std::chrono::time_point<std::chrono::high_resolution_clock> end_)
: node(node_), start(start_), end(end_) {}
~SchedulingElement() noexcept = default;
std::shared_ptr<Node> node;
std::chrono::time_point<std::chrono::high_resolution_clock> start;
std::chrono::time_point<std::chrono::high_resolution_clock> end;
};
public:
struct PriorProducersConsumers {
PriorProducersConsumers();
PriorProducersConsumers(const PriorProducersConsumers&);
~PriorProducersConsumers() noexcept;
bool isPrior = false;
std::set<std::shared_ptr<Aidge::Node>> requiredProducers;
std::set<std::shared_ptr<Aidge::Node>> priorConsumers;
......@@ -73,7 +76,7 @@ public:
// ctor
};
virtual ~Scheduler() noexcept = default;
virtual ~Scheduler() noexcept;
public:
/**
......
......@@ -27,6 +27,17 @@ namespace Aidge {
*/
#define AIDGE_LOG_CONTEXT(...) const Log::Context logContext_##__LINE__(__VA_ARGS__)
template<class U>
static void discard_args(U parg) {
(void)parg;
}
template<class U, class... Us>
static void discard_args(U parg, Us... pargs) {
(void)parg;
discard_args(pargs...);
}
/**
* Aidge logging class, for displaying and file logging of events.
*/
......@@ -54,7 +65,7 @@ public:
};
/**
* Detailed messages for debugging purposes, providing information helpful
* Detailed messages for debugging purposes, providing information helpful
* for developers to trace and identify issues.
* Detailed insights of what is appening in an operation, not useful for the
* end-user. The operation is performed nominally.
......@@ -66,11 +77,13 @@ public:
#ifndef NDEBUG
// only when compiled in Debug
log(Debug, fmt::format(std::forward<Args>(args)...));
#else
discard_args(&args...);
#endif
}
/**
* Messages that provide a record of the normal operation, about
* Messages that provide a record of the normal operation, about
* the application's state, progress, or important events.
* Reports normal start, end and key steps in an operation. The operation is
* performed nominally.
......@@ -103,7 +116,7 @@ public:
}
/**
* Signifies a problem or unexpected condition that the application can
* Signifies a problem or unexpected condition that the application can
* recover from, but attention is needed to prevent further issues.
* The operation could not be performed, but it does not prevent potential
* further operations.
......
......@@ -39,6 +39,10 @@ Aidge::Tensor& Aidge::Tensor::operator=(const Aidge::Tensor& other) {
return *this;
}
Aidge::Tensor::~Tensor() noexcept = default;
void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
// TODO: scalar Tensor not handled
if (dims.empty()) { // scalar
......
/********************************************************************************
* Copyright (c) 2023 CEA-List
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* SPDX-License-Identifier: EPL-2.0
*
********************************************************************************/
#include "aidge/filler/Filler.hpp"
#include <cstdint> // std::uint32_t
#include <memory>
#include <string>
#include <vector>
#include "aidge/data/Tensor.hpp"
#include "aidge/utils/ErrorHandling.hpp"
#include "aidge/utils/Types.h"
void Aidge::calculateFanInFanOut(std::shared_ptr<Aidge::Tensor> tensor,
std::uint32_t& fanIn, std::uint32_t& fanOut) {
AIDGE_ASSERT(
tensor->nbDims() == 4,
"Tensor need to have 4 dimensions to compute FanIn and FanOut.");
// Warning: This function suppose NCXX data layout.
// Aidge currently only support NCHW but this maybe not be true in the
// future.
DimSize_t batchSize = tensor->dims()[0];
DimSize_t channelSize = tensor->dims()[1];
AIDGE_ASSERT(batchSize != 0,
"Cannot calculate FanIn if tensor batch size is 0.");
AIDGE_ASSERT(channelSize != 0,
"Cannot calculate FanOut if tensor channel size is 0.");
fanIn = static_cast<std::uint32_t>(tensor->size() / batchSize);
fanOut = static_cast<std::uint32_t>(tensor->size() / channelSize);
}
......@@ -50,9 +50,9 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
const std::shared_ptr<BatchNorm_Op<2>> batchOp =
std::static_pointer_cast<BatchNorm_Op<2>>(batchnormNode->getOperator());
DimSize_t convNbOutChannels;
DimSize_t channelsSize;
std::array<DimSize_t, 2> kernelDims;
DimSize_t convNbOutChannels = 1;
DimSize_t channelsSize = 1;
std::array<DimSize_t, 2> kernelDims = {1,1};
AIDGE_ASSERT(convNode->getOperator()->operatorType() == OperatorType::Tensor, "Operator must be of Tensor type.");
std::shared_ptr<OperatorTensor> convOp = std::static_pointer_cast<OperatorTensor>(convNode->getOperator());
if (convNode->type() == Conv_Op<2>::Type) {
......@@ -66,7 +66,6 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode,
const std::shared_ptr<ConvDepthWise_Op<2>> convOpPtr =
std::static_pointer_cast<ConvDepthWise_Op<2>>(convNode->getOperator());
convNbOutChannels = convOpPtr->getAttr<DimSize_t>("Channels");
channelsSize = 1;
kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims");
}
......
......@@ -14,6 +14,8 @@
#include "aidge/scheduler/MemoryManager.hpp"
#include "aidge/utils/ErrorHandling.hpp"
Aidge::MemoryManager::~MemoryManager() noexcept = default;
std::shared_ptr<Aidge::MemoryManager::MemorySpace> Aidge::MemoryManager::reserve(
unsigned int size,
const std::set<std::shared_ptr<Node> >& dependencies)
......
......@@ -35,6 +35,12 @@
#include "aidge/operator/Producer.hpp"
#include "aidge/utils/Types.h"
Aidge::Scheduler::~Scheduler() noexcept = default;
Aidge::Scheduler::PriorProducersConsumers::PriorProducersConsumers() = default;
Aidge::Scheduler::PriorProducersConsumers::PriorProducersConsumers(const PriorProducersConsumers&) = default;
Aidge::Scheduler::PriorProducersConsumers::~PriorProducersConsumers() noexcept = default;
void Aidge::Scheduler::generateScheduling() {
auto schedule = generateBaseScheduling();
generateEarlyLateScheduling(schedule);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment