From 60c74485405002dd75c5dec12394a72f20463e0f Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Fri, 1 Sep 2023 09:59:47 +0200
Subject: [PATCH] Fixed compile errors

---
 include/aidge/operator/AvgPoolingImpl_forward_kernels.hpp    | 4 ++--
 include/aidge/operator/ConvDepthWiseImpl_forward_kernels.hpp | 4 ++--
 include/aidge/operator/ConvImpl_forward_kernels.hpp          | 4 ++--
 src/operator/AvgPoolingImpl.cpp                              | 1 +
 src/operator/ConvDepthWiseImpl.cpp                           | 3 ++-
 src/operator/ConvImpl.cpp                                    | 3 ++-
 src/operator/FCImpl.cpp                                      | 3 ++-
 src/operator/ProducerImpl.cpp                                | 3 ++-
 8 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/include/aidge/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/operator/AvgPoolingImpl_forward_kernels.hpp
index 7ff3243d..7ead482c 100644
--- a/include/aidge/operator/AvgPoolingImpl_forward_kernels.hpp
+++ b/include/aidge/operator/AvgPoolingImpl_forward_kernels.hpp
@@ -63,11 +63,11 @@ void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Parameters &par
             for (std::size_t ox = 0; ox < oxSize; ++ox) {
                 const signedsize difx = static_cast<signedsize>(std::get<2>(params)[0] - ox * std::get<0>(params)[0]);
                 const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
-                const std::size_t sxMax = (dims[2] + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<1>(params)[0] ? std::get<1>(params)[0] : dims[2] + difx);
+                const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<1>(params)[0] ? std::get<1>(params)[0] : dims[2] + difx);
                 for (std::size_t oy = 0; oy < oySize; ++oy) {
                     const signedsize dify = static_cast<signedsize>(std::get<2>(params)[1] - oy * std::get<0>(params)[1]);
                     const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
-                    const std::size_t syMax = (dims[3] + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<1>(params)[1] ? std::get<1>(params)[1] : dims[3] + dify);
+                    const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<1>(params)[1] ? std::get<1>(params)[1] : dims[3] + dify);
                     const std::size_t oIndexFull = oIndex + ox*oySize + oy;
                     const std::size_t ix = ox * std::get<0>(params)[0];
                     const std::size_t iy = oy * std::get<0>(params)[1];
diff --git a/include/aidge/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/operator/ConvDepthWiseImpl_forward_kernels.hpp
index 6a7ec3fe..da9c8daf 100644
--- a/include/aidge/operator/ConvDepthWiseImpl_forward_kernels.hpp
+++ b/include/aidge/operator/ConvDepthWiseImpl_forward_kernels.hpp
@@ -69,11 +69,11 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Parameter
             for (std::size_t ox = 0; ox < oxSize; ++ox) {
                 const signedsize difx = static_cast<signedsize>(std::get<4>(params)[0] - ox * std::get<0>(params)[0]);
                 const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
-                const std::size_t sxMax = (dims[2] + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<3>(params)[0] ? std::get<3>(params)[0] : dims[2] + difx);
+                const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<3>(params)[0] ? std::get<3>(params)[0] : dims[2] + difx);
                 for (std::size_t oy = 0; oy < oySize; ++oy) {
                     const signedsize dify = static_cast<signedsize>(std::get<4>(params)[1] - oy * std::get<0>(params)[1]);
                     const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
-                    const std::size_t syMax = (dims[3] + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<3>(params)[1] ? std::get<3>(params)[1] : dims[3] + dify);
+                    const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<3>(params)[1] ? std::get<3>(params)[1] : dims[3] + dify);
                     const std::size_t oIndexFull = oIndex + ox*oySize + oy;
                     const signedsize ix = static_cast<signedsize>(ox * std::get<0>(params)[0]) - std::get<4>(params)[0];
                     const signedsize iy = static_cast<signedsize>(oy * std::get<0>(params)[1]) - std::get<4>(params)[1];
diff --git a/include/aidge/operator/ConvImpl_forward_kernels.hpp b/include/aidge/operator/ConvImpl_forward_kernels.hpp
index 459b5c81..93cf523b 100644
--- a/include/aidge/operator/ConvImpl_forward_kernels.hpp
+++ b/include/aidge/operator/ConvImpl_forward_kernels.hpp
@@ -112,11 +112,11 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Parameters &params, const s
                 for (std::size_t ox = 0; ox < oxSize; ++ox) {
                     const signedsize difx = static_cast<signedsize>(std::get<5>(params)[0] - ox * std::get<0>(params)[0]);
                     const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0)));
-                    const std::size_t sxMax = (dims[2] + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<4>(params)[0] ? std::get<4>(params)[0] : dims[2] + difx);
+                    const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<4>(params)[0] ? std::get<4>(params)[0] : dims[2] + difx);
                     for (std::size_t oy = 0; oy < oySize; ++oy) {
                         const signedsize dify = static_cast<signedsize>(std::get<5>(params)[1] - oy * std::get<0>(params)[1]);
                         const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0)));
-                        const std::size_t syMax = (dims[3] + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<4>(params)[1] ? std::get<4>(params)[1] : dims[3] + dify);
+                        const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<4>(params)[1] ? std::get<4>(params)[1] : dims[3] + dify);
                         const std::size_t oIndexFull = oIndex + ox*oySize + oy;
                         const signedsize ix = static_cast<signedsize>(ox * std::get<0>(params)[0]) - std::get<5>(params)[0];
                         const signedsize iy = static_cast<signedsize>(oy * std::get<0>(params)[1]) - std::get<5>(params)[1];
diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp
index 2e1e901d..eebaa5dd 100644
--- a/src/operator/AvgPoolingImpl.cpp
+++ b/src/operator/AvgPoolingImpl.cpp
@@ -39,6 +39,7 @@ Aidge::NbElts_t Aidge::AvgPoolingImpl2D_cpu::getRequiredMemory(const Aidge::IOIn
                                                            const std::vector<Aidge::DimSize_t> & /*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
 
     const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp
index ce5063dd..75a35cff 100644
--- a/src/operator/ConvDepthWiseImpl.cpp
+++ b/src/operator/ConvDepthWiseImpl.cpp
@@ -36,10 +36,11 @@ Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getNbRequiredProtected(IOIndex_t
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/,
+Aidge::NbElts_t Aidge::ConvDepthWiseImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                            const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
 
     const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp
index a1b3e2ca..e75fab10 100644
--- a/src/operator/ConvImpl.cpp
+++ b/src/operator/ConvImpl.cpp
@@ -36,10 +36,11 @@ Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getNbRequiredProtected(IOIndex_t /*inputI
     return 0;
 }
 
-Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t /*outputIdx*/,
+Aidge::NbElts_t Aidge::ConvImpl2D_cpu::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
 
     const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(outputDims.begin(), outputDims.end(), NbElts_t(1), std::multiplies<NbElts_t>());
diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp
index ca4eebcf..970ce690 100644
--- a/src/operator/FCImpl.cpp
+++ b/src/operator/FCImpl.cpp
@@ -44,10 +44,11 @@ Aidge::NbElts_t
 }
 
 Aidge::NbElts_t Aidge::FCImpl_cpu::getRequiredMemory(
-    const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const
+    const IOIndex_t outputIdx, const std::vector<DimSize_t> &/*inputsSize*/) const
 {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
 
     const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(
diff --git a/src/operator/ProducerImpl.cpp b/src/operator/ProducerImpl.cpp
index b7f1538d..6c1de523 100644
--- a/src/operator/ProducerImpl.cpp
+++ b/src/operator/ProducerImpl.cpp
@@ -42,10 +42,11 @@ std::size_t Aidge::ProducerImpl_cpu::getNbRequiredProtected(
 
 
 std::size_t Aidge::ProducerImpl_cpu::getRequiredMemory(
-    const IOIndex_t /*outputIdx*/, const std::vector<DimSize_t> &/*inputsSize*/) const
+    const IOIndex_t outputIdx, const std::vector<DimSize_t> &/*inputsSize*/) const
 {
     // Requires the whole tensors, regardless of available data on inputs
     assert(outputIdx == 0 && "operator has only one output");
+    (void) outputIdx;
 
     const auto &outputDims = std::static_pointer_cast<Tensor>(mOp.getOutput(0))->dims();
     return std::accumulate(
-- 
GitLab