diff --git a/aidge_export_cpp/kernels/convolution.hpp b/aidge_export_cpp/kernels/convolution.hpp index efc7ee7a192112018d6c582207560d93b4548add..6ea9f0579b84dd5a28a5ea66a778326fcd9c84ce 100644 --- a/aidge_export_cpp/kernels/convolution.hpp +++ b/aidge_export_cpp/kernels/convolution.hpp @@ -65,8 +65,8 @@ void convolution_forward( int oOffset = NB_OUTPUTS * oPos; // <-- - - Bias_T weightedSum = biases[output]; + // Check if the biases are defined + Bias_T weightedSum = biases ? biases[output] : 0; for (int sy = 0; sy < KERNEL_HEIGHT; ++sy) { if ((PADDING_Y != 0 @@ -116,4 +116,45 @@ void convolution_forward( } } +// Template specialization when biases are not given to the convolution +template<int NB_CHANNELS, + int CHANNELS_HEIGHT, int CHANNELS_WIDTH, + int NB_OUTPUTS, + int OUTPUTS_HEIGHT, int OUTPUTS_WIDTH, + int PADDING_Y, int PADDING_X, + int STRIDE_Y, int STRIDE_X, + int DILATION_Y, int DILATION_X, + int KERNEL_HEIGHT, int KERNEL_WIDTH, + ActivationFunction_T ACTIVATION, + typename Input_T, typename Output_T, + typename Weight_T, + typename Rescaling_T> +__attribute__((always_inline)) inline +void convolution_forward( + const Input_T* __restrict inputs, + Output_T* __restrict outputs, + const Weight_T* __restrict weights, + std::nullptr_t __restrict, + const Rescaling_T& __restrict rescaling) +{ + const float* b = nullptr; + + convolution_forward<NB_CHANNELS, + CHANNELS_HEIGHT, + CHANNELS_WIDTH, + NB_OUTPUTS, + OUTPUTS_HEIGHT, + OUTPUTS_WIDTH, + PADDING_Y, + PADDING_X, + STRIDE_Y, + STRIDE_X, + DILATION_Y, + DILATION_X, + KERNEL_HEIGHT, + KERNEL_WIDTH, + ACTIVATION> + (inputs, outputs, weights, b, rescaling); +} + #endif // __AIDGE_EXPORT_CPP_KERNELS_CONVOLUTION__