Skip to content
Snippets Groups Projects
Unverified Commit dc6653a0 authored by Maxence Naud's avatar Maxence Naud
Browse files

[rm] Conv type conversion warnings

parent 10ca7e16
No related branches found
No related tags found
2 merge requests!710.4.0,!61Fix conv type conversion warnings
Pipeline #78988 passed
......@@ -8,27 +8,27 @@
#include "network/activation_utils.hpp"
template<int NB_CHANNELS,
int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
int NB_OUTPUTS,
int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
int PADDING_Y, int PADDING_X,
int STRIDE_Y, int STRIDE_X,
int DILATION_Y, int DILATION_X,
int KERNEL_HEIGHT, int KERNEL_WIDTH,
template<size_t NB_CHANNELS,
size_t CHANNELS_HEIGHT, size_t CHANNELS_WIDTH,
size_t NB_OUTPUTS,
size_t OUTPUTS_HEIGHT, size_t OUTPUTS_WIDTH,
size_t PADDING_Y, size_t PADDING_X,
size_t STRIDE_Y, size_t STRIDE_X,
size_t DILATION_Y, size_t DILATION_X,
size_t KERNEL_HEIGHT, size_t KERNEL_WIDTH,
ActivationFunction_T ACTIVATION,
// Memory mapping: inputs
int INPUT_MEM_CONT_OFFSET,
int INPUT_MEM_CONT_SIZE,
int INPUT_MEM_WRAP_OFFSET,
int INPUT_MEM_WRAP_SIZE,
int INPUT_MEM_STRIDE,
size_t INPUT_MEM_CONT_OFFSET,
size_t INPUT_MEM_CONT_SIZE,
size_t INPUT_MEM_WRAP_OFFSET,
size_t INPUT_MEM_WRAP_SIZE,
size_t INPUT_MEM_STRIDE,
// Memory mapping: outputs
int OUTPUT_MEM_CONT_OFFSET,
int OUTPUT_MEM_CONT_SIZE,
int OUTPUT_MEM_WRAP_OFFSET,
int OUTPUT_MEM_WRAP_SIZE,
int OUTPUT_MEM_STRIDE,
size_t OUTPUT_MEM_CONT_OFFSET,
size_t OUTPUT_MEM_CONT_SIZE,
size_t OUTPUT_MEM_WRAP_OFFSET,
size_t OUTPUT_MEM_WRAP_SIZE,
size_t OUTPUT_MEM_STRIDE,
typename Input_T, typename Output_T,
typename Weight_T, typename Bias_T,
typename Rescaling_T>
......@@ -40,43 +40,43 @@ void convolution_forward(
const Bias_T* __restrict biases,
const Rescaling_T& __restrict rescaling)
{
constexpr int DILATED_KERNEL_HEIGHT
constexpr size_t DILATED_KERNEL_HEIGHT
= KERNEL_HEIGHT + (DILATION_Y - 1) * (KERNEL_HEIGHT - 1);
constexpr int DILATED_KERNEL_WIDTH
constexpr size_t DILATED_KERNEL_WIDTH
= KERNEL_WIDTH + (DILATION_X - 1) * (KERNEL_WIDTH - 1);
constexpr int OUTPUTS_HEIGHT_NOPAD
constexpr size_t OUTPUTS_HEIGHT_NOPAD
= (CHANNELS_HEIGHT - DILATION_Y * (KERNEL_HEIGHT - 1) - 1 + STRIDE_Y) / STRIDE_Y;
constexpr int OUTPUTS_WIDTH_NOPAD
constexpr size_t OUTPUTS_WIDTH_NOPAD
= (CHANNELS_WIDTH - DILATION_X * (KERNEL_WIDTH - 1) - 1 + STRIDE_X) / STRIDE_X;
for (int oy = 0; oy < OUTPUTS_HEIGHT; ++oy) {
const int syMin = (PADDING_Y == 0) ? 0
for (size_t oy = 0; oy < OUTPUTS_HEIGHT; ++oy) {
const size_t syMin = (PADDING_Y == 0) ? 0
: max(PADDING_Y - (oy * STRIDE_Y), 0);
const int syMax = (PADDING_Y == 0
const size_t syMax = (PADDING_Y == 0
&& OUTPUTS_HEIGHT == OUTPUTS_HEIGHT_NOPAD) ? DILATED_KERNEL_HEIGHT
: clamp(CHANNELS_HEIGHT + PADDING_Y - (oy * STRIDE_Y),
0, DILATED_KERNEL_HEIGHT);
const int iy = (oy * STRIDE_Y) - PADDING_Y;
const int iy = static_cast<int>(oy * STRIDE_Y) - static_cast<int>(PADDING_Y);
#ifdef _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int ox = 0; ox < OUTPUTS_WIDTH; ++ox) {
for (int output = 0; output < NB_OUTPUTS; ++output) {
for (size_t ox = 0; ox < OUTPUTS_WIDTH; ++ox) {
for (size_t output = 0; output < NB_OUTPUTS; ++output) {
// moved to inner loop for collapsing -->
const int sxMin = (PADDING_X == 0) ? 0
const size_t sxMin = (PADDING_X == 0) ? 0
: max(PADDING_X - (ox * STRIDE_X), 0);
const int sxMax = (PADDING_X == 0
const size_t sxMax = (PADDING_X == 0
&& OUTPUTS_WIDTH == OUTPUTS_WIDTH_NOPAD)
? DILATED_KERNEL_WIDTH
: clamp(CHANNELS_WIDTH + PADDING_X - (ox * STRIDE_X),
0, DILATED_KERNEL_WIDTH);
const int ix = (ox * STRIDE_X) - PADDING_X;
const int ix = static_cast<int>(ox * STRIDE_X) - static_cast<int>(PADDING_X);
const int oPos = (ox + OUTPUTS_WIDTH * oy);
int oOffset = (OUTPUT_MEM_STRIDE / sizeof(Output_T)) * oPos;
const size_t oPos = (ox + OUTPUTS_WIDTH * oy);
size_t oOffset = (OUTPUT_MEM_STRIDE / sizeof(Output_T)) * oPos;
if (OUTPUT_MEM_WRAP_SIZE > 0 && oOffset >= (OUTPUT_MEM_CONT_SIZE / sizeof(Output_T))) {
oOffset += (OUTPUT_MEM_WRAP_OFFSET - OUTPUT_MEM_CONT_OFFSET
......@@ -87,7 +87,7 @@ void convolution_forward(
// Check if the biases are defined
Bias_T weightedSum = biases ? biases[output] : 0;
for (int sy = 0; sy < KERNEL_HEIGHT; ++sy) {
for (size_t sy = 0; sy < KERNEL_HEIGHT; ++sy) {
if ((PADDING_Y != 0
|| OUTPUTS_HEIGHT != OUTPUTS_HEIGHT_NOPAD)
&& ((sy*DILATION_Y < syMin) || (sy*DILATION_Y >= syMax)))
......@@ -95,9 +95,9 @@ void convolution_forward(
continue;
}
const int iPos = ((sxMin + ix)
+ CHANNELS_WIDTH * (iy + syMin + sy * DILATION_Y));
int iOffset = (INPUT_MEM_STRIDE / sizeof(Input_T)) * iPos;
const size_t iPos = (static_cast<size_t>(sxMin + ix)
+ CHANNELS_WIDTH * (static_cast<size_t>(iy + syMin) + sy * DILATION_Y));
size_t iOffset = (INPUT_MEM_STRIDE / sizeof(Input_T)) * iPos;
// Wrapping cannot occur in the middle of a line, except if
// there is only one line (1D)!
......@@ -117,7 +117,7 @@ void convolution_forward(
wrapInRange = true;
}
const int wOffset = NB_CHANNELS * (sxMin
const size_t wOffset = NB_CHANNELS * (sxMin
+ KERNEL_WIDTH * (syMin + sy + KERNEL_HEIGHT * output));
if (!wrapInRange && NB_CHANNELS == (INPUT_MEM_STRIDE / sizeof(Input_T))
......@@ -130,7 +130,7 @@ void convolution_forward(
weightedSum);
}
else {
for (int sx = 0; sx < KERNEL_WIDTH; ++sx) {
for (size_t sx = 0; sx < KERNEL_WIDTH; ++sx) {
if ((PADDING_X != 0
|| OUTPUTS_WIDTH != OUTPUTS_WIDTH_NOPAD)
&& ((sx*DILATION_X < sxMin) || (sx*DILATION_X >= sxMax)))
......@@ -138,7 +138,7 @@ void convolution_forward(
continue;
}
int iOffsetInRange = iOffset
size_t iOffsetInRange = iOffset
+ sx * DILATION_X * (INPUT_MEM_STRIDE / sizeof(Input_T));
if (wrapInRange
......@@ -164,28 +164,28 @@ void convolution_forward(
}
}
// Template specialization when biases are not given to the convolution
template<int NB_CHANNELS,
int CHANNELS_HEIGHT, int CHANNELS_WIDTH,
int NB_OUTPUTS,
int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH,
int PADDING_Y, int PADDING_X,
int STRIDE_Y, int STRIDE_X,
int DILATION_Y, int DILATION_X,
int KERNEL_HEIGHT, int KERNEL_WIDTH,
// Template overloading when biases are not given to the convolution
template<size_t NB_CHANNELS,
size_t CHANNELS_HEIGHT, size_t CHANNELS_WIDTH,
size_t NB_OUTPUTS,
size_t OUTPUTS_HEIGHT, size_t OUTPUTS_WIDTH,
size_t PADDING_Y, size_t PADDING_X,
size_t STRIDE_Y, size_t STRIDE_X,
size_t DILATION_Y, size_t DILATION_X,
size_t KERNEL_HEIGHT, size_t KERNEL_WIDTH,
ActivationFunction_T ACTIVATION,
// Memory mapping: inputs
int INPUT_MEM_CONT_OFFSET,
int INPUT_MEM_CONT_SIZE,
int INPUT_MEM_WRAP_OFFSET,
int INPUT_MEM_WRAP_SIZE,
int INPUT_MEM_STRIDE,
size_t INPUT_MEM_CONT_OFFSET,
size_t INPUT_MEM_CONT_SIZE,
size_t INPUT_MEM_WRAP_OFFSET,
size_t INPUT_MEM_WRAP_SIZE,
size_t INPUT_MEM_STRIDE,
// Memory mapping: outputs
int OUTPUT_MEM_CONT_OFFSET,
int OUTPUT_MEM_CONT_SIZE,
int OUTPUT_MEM_WRAP_OFFSET,
int OUTPUT_MEM_WRAP_SIZE,
int OUTPUT_MEM_STRIDE,
size_t OUTPUT_MEM_CONT_OFFSET,
size_t OUTPUT_MEM_CONT_SIZE,
size_t OUTPUT_MEM_WRAP_OFFSET,
size_t OUTPUT_MEM_WRAP_SIZE,
size_t OUTPUT_MEM_STRIDE,
typename Input_T, typename Output_T,
typename Weight_T,
typename Rescaling_T>
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment