diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp index 0d73cb912a0b8218c29d1f533b674a8ea5005d26..e7bc3a2b845d077877684f75d7980fcd1958eb6e 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_kernels.hpp @@ -79,8 +79,8 @@ void AvgPoolingImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& strideD #ifdef _OPENMP #pragma omp parallel for collapse(2) if (dims[0] * dims[1] > 32) #endif - for (std::size_t batch = 0; batch < dims[0]; ++batch) { - for (std::size_t ch = 0; ch < dims[1]; ++ch) { + for (int batch = 0; batch < static_cast<int>(dims[0]); ++batch) { + for (int ch = 0; ch < static_cast<int>(dims[1]); ++ch) { const std::size_t oIndex = (ch + batch * dims[1]) * oxSize * oySize; const std::size_t iIndex = (ch + batch * dims[1]) * dims[2] * dims[3]; diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp index 7bb7971e35916b5ecf9c59ad3fe55965f53eed91..105a33007bae830507128804641b8feb16bc0848 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl_kernels.hpp @@ -56,8 +56,8 @@ void BatchNormImpl2D_cpu_forward_kernel(float epsilon, float momentum, const std #ifdef _OPENMP #pragma omp parallel for collapse(2) if (nbBatch * nbChannels > 32) #endif - for (std::size_t batch = 0; batch < nbBatch; ++batch) { - for (std::size_t ch = 0; ch < nbChannels; ++ch) { + for (int batch = 0; batch < static_cast<int>(nbBatch); ++batch) { + for (int ch = 0; ch < static_cast<int>(nbChannels); ++ch) { const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize; std::fill(output + ioIndex, output + ioIndex + featureMapSize, shift[ch]); const P var = std::sqrt(batchVar[ch] + static_cast<P>(epsilon)); diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp index b16a819b4b42127dddda0659099018a494d06bc9..3019b1d254f4102aabbcfcf4b59b40f10732a18b 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_kernels.hpp @@ -68,8 +68,8 @@ void ConvDepthWiseImpl1D_cpu_forward_kernel(const std::array<DimSize_t, 1>& stri #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * inputDims[1] > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t ch = 0; ch < inputDims[1]; ++ch) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int ch = 0; ch < static_cast<int>(inputDims[1]); ++ch) { const std::size_t oIndex = (ch + batch*inputDims[1]) * oxSize; B biasVal = (biases != nullptr) ? biases[ch] : B(0); std::fill(output + oIndex, output+(oIndex+oxSize), biasVal); @@ -158,8 +158,8 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& stri #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * inputDims[1] > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t ch = 0; ch < inputDims[1]; ++ch) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int ch = 0; ch < static_cast<int>(inputDims[1]); ++ch) { B biasVal = (biases != nullptr) ? biases[ch] : B(0); std::size_t oIndex = (ch + batch*inputDims[1]) * outChannels_s; @@ -201,8 +201,8 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& stri #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * inputDims[1] > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t ch = 0; ch < inputDims[1]; ++ch) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int ch = 0; ch < static_cast<int>(inputDims[1]); ++ch) { B biasVal = (biases != nullptr) ? biases[ch] : B(0); std::size_t oIndex = (ch + batch*inputDims[1]) * outChannels_s; @@ -226,8 +226,8 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& stri #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * inputDims[1] > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t ch = 0; ch < inputDims[1]; ++ch) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int ch = 0; ch < static_cast<int>(inputDims[1]); ++ch) { const std::size_t oIndex = (ch + batch*inputDims[1]) * outChannels_s; const std::size_t iIndex = (ch + batch*inputDims[1]) * inputDims[2] * inputDims[3]; const std::size_t wIndex = ch * kernelDims[0] * kernelDims[1]; diff --git a/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp index b1cd006eae2d0ed9a224b8bdbf5b267472795720..d72761600f261e1222c287e051af98561819011e 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl_kernels.hpp @@ -62,8 +62,8 @@ void ConvImpl1D_cpu_forward_kernel(const array<DimSize_t, 1> &strideDim, #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * outChannels > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t outCh = 0; outCh < outChannels; ++outCh) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int outCh = 0; outCh < static_cast<int>(outChannels); ++outCh) { const std::size_t oIndex = (outCh + batch * outChannels) * oxSize; // If bias = nullptr, set B(0) B biasVal = (biases != nullptr) ? biases[outCh] : B(0); @@ -484,8 +484,8 @@ void ConvImpl2D_cpu_forward_kernel(const array<DimSize_t, 2> &strideDims, #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * outChannels > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t outCh = 0; outCh < outChannels; ++outCh) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int outCh = 0; outCh < static_cast<int>(outChannels); ++outCh) { std::size_t oIndex = (outCh + batch*inputDims[1]) * outChannels_s; // If bias = nullptr, set B(0) @@ -573,8 +573,8 @@ void ConvImpl2D_cpu_forward_kernel(const array<DimSize_t, 2> &strideDims, #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * outChannels > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t outCh = 0; outCh < outChannels; ++outCh) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int outCh = 0; outCh < static_cast<int>(outChannels); ++outCh) { std::size_t oIndex = (outCh + batch*inputDims[1]) * outChannels_s; // If bias = nullptr, set B(0) @@ -609,8 +609,8 @@ void ConvImpl2D_cpu_forward_kernel(const array<DimSize_t, 2> &strideDims, #ifdef _OPENMP #pragma omp parallel for collapse(2) if (inputDims[0] * outChannels > 32) #endif - for (std::size_t batch = 0; batch < inputDims[0]; ++batch) { - for (std::size_t outCh = 0; outCh < outChannels; ++outCh) { + for (int batch = 0; batch < static_cast<int>(inputDims[0]); ++batch) { + for (int outCh = 0; outCh < static_cast<int>(outChannels); ++outCh) { std::size_t oIndex = (outCh + batch*inputDims[1]) * outChannels_s; // If bias = nullptr, set B(0) diff --git a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp index 3915adb3a0fcfd8cef0bf78761b2169272e1c211..8ff1ad08b0897dbc3d89d67632a555effba73a85 100644 --- a/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/GlobalAveragePoolingImpl_kernels.hpp @@ -77,8 +77,8 @@ void GlobalAveragePoolingImpl_cpu_forward_kernel(const std::shared_ptr<Tensor>& #ifdef _OPENMP #pragma omp parallel for collapse(2) if (dims[0] * dims[1] > 32) #endif - for (DimSize_t batch = 0; batch < dims[0]; ++batch) { - for (DimSize_t channel = 0; channel < dims[1]; ++channel) { + for (int batch = 0; batch < static_cast<int>(dims[0]); ++batch) { + for (int channel = 0; channel < static_cast<int>(dims[1]); ++channel) { const I *filter_start = std::next( input, (batch * in_batch_nb_elems) + (channel * in_channel_nb_elems)); output[batch * out_batch_nb_elems + channel] = castFromFloat<O>(stableMean<I>(filter_start, in_channel_nb_elems)); diff --git a/include/aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp b/include/aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp index 9772b0abd2dfd0e5a6b2e11a856f734592e478db..b5f219f9086f387f86769a582c2a2cd6aaa42d9f 100644 --- a/include/aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/MaxPoolingImpl_kernels.hpp @@ -69,8 +69,8 @@ void MaxPoolingImpl2D_cpu_forward_kernel( #ifdef _OPENMP #pragma omp parallel for collapse(2) if (dims[0] * dims[1] > 32) #endif - for (std::size_t batch = 0; batch < dims[0]; ++batch){ - for (std::size_t channel = 0; channel < dims[1]; ++channel){ + for (int batch = 0; batch < static_cast<int>(dims[0]); ++batch){ + for (int channel = 0; channel < static_cast<int>(dims[1]); ++channel){ auto batchChannelIndex = (channel + batch * dims[1]); const std::size_t outputBaseIndex = batchChannelIndex * outXSize * outYSize; const std::size_t inputBaseIndex = batchChannelIndex * dims[2] * dims[3]; diff --git a/include/aidge/backend/cpu/operator/SoftmaxImpl_kernels.hpp b/include/aidge/backend/cpu/operator/SoftmaxImpl_kernels.hpp index e74f3518bf0f394d17d89542b2a2221047beb0af..ab6790e257b04a2fb1ee3d3ed57c5c7220c6c456 100644 --- a/include/aidge/backend/cpu/operator/SoftmaxImpl_kernels.hpp +++ b/include/aidge/backend/cpu/operator/SoftmaxImpl_kernels.hpp @@ -40,8 +40,8 @@ void SoftmaxImpl_cpu_forward_kernel(std::size_t axisIdx, const std::vector<DimSi #ifdef _OPENMP #pragma omp parallel for collapse(2) if (preAxisElems * postAxisElems > 32) #endif - for (std::size_t i = 0; i < preAxisElems; ++i) { - for (std::size_t j = 0; j < postAxisElems; ++j) { + for (int i = 0; i < static_cast<int>(preAxisElems); ++i) { + for (int j = 0; j < static_cast<int>(postAxisElems); ++j) { I maxVal = input[i * inputDims[axisIdx] * postAxisElems + j]; for (std::size_t k = 1; k < inputDims[axisIdx]; ++k) { std::size_t inIdx = i * inputDims[axisIdx] * postAxisElems + k * postAxisElems + j;