diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp index 490598599aedf24b26865ce6a1ddb3fe32044b1b..221e36dcfac44e21d1b1a35674ca21403b4b57ab 100644 --- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp @@ -20,7 +20,7 @@ namespace Aidge { template <class I1, class O> void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); O* output = static_cast<O*>(output_); @@ -32,7 +32,7 @@ void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* inp template <class I1, class I2, class O> void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); const I2* input2 = static_cast<const I2*>(input2_); O* output = static_cast<O*>(output_); @@ -45,7 +45,7 @@ void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* inp template <class I1, class I2, class I3, class O> void AddImpl3I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_, const void* input3_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); const I2* input2 = static_cast<const I2*>(input2_); const I3* input3 = static_cast<const I3*>(input3_); diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp index 8373cb84a550efd8741a2dbc04c1e94ad37fe611..cfbcadfe6b719369618955a14c4cde5733ef6773 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp @@ -29,11 +29,11 @@ namespace Aidge { class AvgPoolingImpl2DForward_cpu : public Registrable<AvgPoolingImpl2DForward_cpu, std::tuple<DataType, DataType>, - void(const AvgPooling_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; + void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; class AvgPoolingImpl2DBackward_cpu : public Registrable<AvgPoolingImpl2DBackward_cpu, std::tuple<DataType, DataType>, - void(const AvgPooling_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; + void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; class AvgPoolingImpl2D_cpu : public OperatorImpl { private: diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp index 776e020f1a20056db345c8e845fd73bb31b4138b..60b4923bdc18674da52be9bd07d9947fb9790f0d 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp @@ -26,51 +26,51 @@ namespace Aidge { * @brief Forward kernel for 2D AvgPoolingolution on CPU backend. * @tparam I Input data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param output_ Output Tensor. */ template <class I, class O> -void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Parameters ¶ms, +void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims, const void *input_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); O *output = static_cast<O *>(output_); // output H size const std::size_t oxSize = - static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<2>(params)[0] + std::get<2>(params)[2] - std::get<1>(params)[0] + std::get<0>(params)[0]) / - static_cast<float>(std::get<0>(params)[0]))); + static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<2>(attrs)[0] + std::get<2>(attrs)[2] - std::get<1>(attrs)[0] + std::get<0>(attrs)[0]) / + static_cast<float>(std::get<0>(attrs)[0]))); // output W size const std::size_t oySize = - static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<2>(params)[1] + std::get<2>(params)[3] - std::get<1>(params)[1] + std::get<0>(params)[1]) / - static_cast<float>(std::get<0>(params)[1]))); + static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<2>(attrs)[1] + std::get<2>(attrs)[3] - std::get<1>(attrs)[1] + std::get<0>(attrs)[1])/ + static_cast<float>(std::get<0>(attrs)[1]))); // TODO: kernel computation // output (batch, outCh, Xout, Yout) // input (batch, ch, Xin, Yin) // weight (outCh, ch, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t ch = 0; ch < dims[1]; ++ch) { const std::size_t oIndex = (ch + batch*dims[1]) * oxSize * oySize; const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3]; for (std::size_t ox = 0; ox < oxSize; ++ox) { - const signedsize difx = static_cast<signedsize>(std::get<2>(params)[0] - ox * std::get<0>(params)[0]); + const signedsize difx = static_cast<signedsize>(std::get<2>(attrs)[0] - ox * std::get<0>(attrs)[0]); const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0))); - const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<1>(params)[0] ? std::get<1>(params)[0] : dims[2] + difx); + const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<1>(attrs)[0] ? std::get<1>(attrs)[0] : dims[2] + difx); for (std::size_t oy = 0; oy < oySize; ++oy) { - const signedsize dify = static_cast<signedsize>(std::get<2>(params)[1] - oy * std::get<0>(params)[1]); + const signedsize dify = static_cast<signedsize>(std::get<2>(attrs)[1] - oy * std::get<0>(attrs)[1]); const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0))); - const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<1>(params)[1] ? std::get<1>(params)[1] : dims[3] + dify); + const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<1>(attrs)[1] ? std::get<1>(attrs)[1] : dims[3] + dify); const std::size_t oIndexFull = oIndex + ox*oySize + oy; - const std::size_t ix = ox * std::get<0>(params)[0]; - const std::size_t iy = oy * std::get<0>(params)[1]; + const std::size_t ix = ox * std::get<0>(attrs)[0]; + const std::size_t iy = oy * std::get<0>(attrs)[1]; if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3) { output[oIndexFull] += static_cast<O>( diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp index d9f25b4a8e38510f82fc5afe9ed4b656197a47d5..30557f6cbba05829b3cc9e17364ae4d933a568cf 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp @@ -29,7 +29,7 @@ namespace Aidge { class BatchNormImpl2DForward_cpu : public Registrable<BatchNormImpl2DForward_cpu, std::tuple<DataType, DataType, DataType>, - void(const BatchNorm_Op<2>::Parameters &, + void(const BatchNorm_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, @@ -41,7 +41,7 @@ class BatchNormImpl2DForward_cpu class BatchNormImpl2DBackward_cpu : public Registrable<BatchNormImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType>, - void(const BatchNorm_Op<2>::Parameters &, + void(const BatchNorm_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp index eedb80bde60d65b53bac70cc33ca83eb4f0121e7..486829e782ae2173332a7efa6646bb7bba322252 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param scale_ const scale Tensor. @@ -37,9 +37,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class P, class O> -void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Parameters ¶ms, const std::array<DimSize_t, 4> &dims, +void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims, const void *input_, const void *scale_, const void *shift_, void *batchMean_, void *batchVar_, void *output_, const bool freeze) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const P *scale = static_cast<const P *>(scale_); const P *shift = static_cast<const P *>(shift_); @@ -52,12 +52,12 @@ void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Parameters ¶m const DimSize_t featureMapSize = dims[2]*dims[3]; - if ((freeze == true) || (std::get<1>(params) == 0.0f)) { + if ((freeze == true) || (std::get<1>(attrs) == 0.0f)) { for (std::size_t batch = 0; batch < nbBatch; ++batch) { for (std::size_t ch = 0; ch < nbChannels; ++ch) { const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize; std::fill(output + ioIndex, output + ioIndex + featureMapSize, shift[ch]); - const P var = std::sqrt(batchVar[ch] + static_cast<P>(std::get<0>(params))); + const P var = std::sqrt(batchVar[ch] + static_cast<P>(std::get<0>(attrs))); for (std::size_t feature = 0; feature<featureMapSize; ++feature) { output[ioIndex + feature] += scale[ch] * (input[ioIndex + feature]-batchMean[ch]) / var; @@ -81,10 +81,10 @@ void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Parameters ¶m const I inputMean = sum / static_cast<I>(nbDataPerChannel); const I inputVar = sumSquare / static_cast<I>(nbDataPerChannel) - inputMean*inputMean; - batchMean[ch] = batchMean[ch]*(1-std::get<1>(params)) + inputMean*std::get<1>(params); - batchVar[ch] = batchVar[ch]*(1-std::get<1>(params)) + inputVar*(static_cast<I>(nbDataPerChannel)/static_cast<I>(nbDataPerChannel-1))*std::get<1>(params); + batchMean[ch] = batchMean[ch]*(1-std::get<1>(attrs)) + inputMean*std::get<1>(attrs); + batchVar[ch] = batchVar[ch]*(1-std::get<1>(attrs)) + inputVar*(static_cast<I>(nbDataPerChannel)/static_cast<I>(nbDataPerChannel-1))*std::get<1>(attrs); - const P var = std::sqrt(inputVar + static_cast<P>(std::get<0>(params))); + const P var = std::sqrt(inputVar + static_cast<P>(std::get<0>(attrs))); for (std::size_t batch = 0; batch < nbBatch; ++batch) { const std::size_t ioIndex = (ch + batch*nbChannels) * featureMapSize; for (std::size_t feature = 0; feature<featureMapSize; ++feature) { diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp index 0d21c676d797b2fc4e95c4aea47674c8fca5eef4..2826b635590c5d19f34c8e4beee20fc8dba2183b 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp @@ -29,12 +29,12 @@ namespace Aidge { class ConvDepthWiseImpl2DForward_cpu : public Registrable<ConvDepthWiseImpl2DForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const ConvDepthWise_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, + void(const ConvDepthWise_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvDepthWiseImpl2DBackward_cpu : public Registrable<ConvDepthWiseImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const ConvDepthWise_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, + void(const ConvDepthWise_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvDepthWiseImpl2D_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp index ee2d82e00376c5a2cc5a075565e35eb8885c021e..669bdbc898528b0f96a59dd3c6f8e438ae1291e4 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param weights_ const weight Tensor. @@ -35,9 +35,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class W, class B, class O> -void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Parameters ¶ms, const std::array<DimSize_t, 4> &dims, +void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims, const void *input_, const void *weights_, const void *biases_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const W *weights = static_cast<const W *>(weights_); const B *biases = static_cast<const B *>(biases_); @@ -46,52 +46,52 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Parameter // output H size const std::size_t oxSize = - static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<4>(params)[0] + std::get<4>(params)[2] - std::get<3>(params)[0] + std::get<0>(params)[0]) / - static_cast<float>(std::get<0>(params)[0]))); + static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<4>(attrs)[0] + std::get<4>(attrs)[2] - std::get<3>(attrs)[0] + std::get<0>(attrs)[0]) / + static_cast<float>(std::get<0>(attrs)[0]))); // output W size const std::size_t oySize = - static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<4>(params)[1] + std::get<4>(params)[3] - std::get<3>(params)[1] + std::get<0>(params)[1]) / - static_cast<float>(std::get<0>(params)[1]))); + static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<4>(attrs)[1] + std::get<4>(attrs)[3] - std::get<3>(attrs)[1] + std::get<0>(attrs)[1]) / + static_cast<float>(std::get<0>(attrs)[1]))); // TODO: kernel computation // output (batch, outCh, Xout, Yout) // input (batch, ch, Xin, Yin) // weight (outCh, ch, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { - for (std::size_t ch = 0; ch < std::get<2>(params); ++ch) { - const std::size_t oIndex = (ch + batch*std::get<2>(params)) * oxSize * oySize; + for (std::size_t ch = 0; ch < std::get<2>(attrs); ++ch) { + const std::size_t oIndex = (ch + batch*std::get<2>(attrs)) * oxSize * oySize; B biasVal = (biases != nullptr) ? biases[ch] : B(0); std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal); const std::size_t iIndex = (ch + batch*dims[1]) * dims[2] * dims[3]; - const std::size_t wIndex = ch * std::get<3>(params)[0] * std::get<3>(params)[1]; + const std::size_t wIndex = ch * std::get<3>(attrs)[0] * std::get<3>(attrs)[1]; for (std::size_t ox = 0; ox < oxSize; ++ox) { - const signedsize difx = static_cast<signedsize>(std::get<4>(params)[0] - ox * std::get<0>(params)[0]); + const signedsize difx = static_cast<signedsize>(std::get<4>(attrs)[0] - ox * std::get<0>(attrs)[0]); const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0))); - const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<3>(params)[0] ? std::get<3>(params)[0] : dims[2] + difx); + const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<3>(attrs)[0] ? std::get<3>(attrs)[0] : dims[2] + difx); for (std::size_t oy = 0; oy < oySize; ++oy) { - const signedsize dify = static_cast<signedsize>(std::get<4>(params)[1] - oy * std::get<0>(params)[1]); + const signedsize dify = static_cast<signedsize>(std::get<4>(attrs)[1] - oy * std::get<0>(attrs)[1]); const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0))); - const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<3>(params)[1] ? std::get<3>(params)[1] : dims[3] + dify); + const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<3>(attrs)[1] ? std::get<3>(attrs)[1] : dims[3] + dify); const std::size_t oIndexFull = oIndex + ox*oySize + oy; - const signedsize ix = static_cast<signedsize>(ox * std::get<0>(params)[0]) - std::get<4>(params)[0]; - const signedsize iy = static_cast<signedsize>(oy * std::get<0>(params)[1]) - std::get<4>(params)[1]; + const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]) - std::get<4>(attrs)[0]; + const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]) - std::get<4>(attrs)[1]; if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3) { - output[oIndexFull] += (weights[wIndex + 0*std::get<3>(params)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] + - weights[wIndex + 0*std::get<3>(params)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+1)] + - weights[wIndex + 0*std::get<3>(params)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+2)] + - weights[wIndex + 1*std::get<3>(params)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+0)] + - weights[wIndex + 1*std::get<3>(params)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+1)] + - weights[wIndex + 1*std::get<3>(params)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+2)] + - weights[wIndex + 2*std::get<3>(params)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+0)] + - weights[wIndex + 2*std::get<3>(params)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+1)] + - weights[wIndex + 2*std::get<3>(params)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+2)]); + output[oIndexFull] += (weights[wIndex + 0*std::get<3>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] + + weights[wIndex + 0*std::get<3>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+1)] + + weights[wIndex + 0*std::get<3>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+2)] + + weights[wIndex + 1*std::get<3>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+0)] + + weights[wIndex + 1*std::get<3>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+1)] + + weights[wIndex + 1*std::get<3>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+2)] + + weights[wIndex + 2*std::get<3>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+0)] + + weights[wIndex + 2*std::get<3>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+1)] + + weights[wIndex + 2*std::get<3>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+2)]); } else { for (std::size_t sx = sxMin; sx < sxMax; ++sx) { for (std::size_t sy = syMin; sy < syMax; ++sy) { - output[oIndexFull] += weights[wIndex + sx*std::get<3>(params)[1] + sy] * + output[oIndexFull] += weights[wIndex + sx*std::get<3>(attrs)[1] + sy] * input[iIndex + static_cast<std::size_t>(ix+static_cast<signedsize>(sx))*dims[3] + static_cast<std::size_t>(iy+static_cast<signedsize>(sy))]; } } diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp index 1f3dffe43b966bc37887f267cc56760a899476f9..b9411fe0f1ac079d9857cc8f2178fc98fadc3a77 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp @@ -29,12 +29,12 @@ namespace Aidge { class ConvImpl2DForward_cpu : public Registrable<ConvImpl2DForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const Conv_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, + void(const Conv_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvImpl2DBackward_cpu : public Registrable<ConvImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const Conv_Op<2>::Parameters &, const std::array<DimSize_t, 4> &, const void *, + void(const Conv_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvImpl2D_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp index bc2f10099f42cba91be8d089b66dc176fdeb7c10..9d4d6dfdfcc114e47e478089c4d5a42c2bee0f28 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param weights_ const weight Tensor. @@ -35,9 +35,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class W, class B, class O> -void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Parameters ¶ms, const std::array<DimSize_t, 4> &dims, +void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs &attrs, const std::array<DimSize_t, 4> &dims, const void *input_, const void *weights_, const void *biases_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const W *weights = static_cast<const W *>(weights_); const B *biases = static_cast<const B *>(biases_); @@ -45,34 +45,34 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Parameters ¶ms, const s /* // output H size const std::size_t oxSize = - static_cast<std::size_t>(static_cast<float>(dims[0] - std::get<4>(params)[0] + std::get<0>(params)[0]) / - static_cast<float>(std::get<0>(params)[0])); + static_cast<std::size_t>(static_cast<float>(dims[0] - std::get<4>(attrs)[0] + std::get<0>(attrs)[0]) / + static_cast<float>(std::get<0>(attrs)[0])); // output W size const std::size_t oySize = - static_cast<std::size_t>(static_cast<float>(dims[1] - std::get<4>(params)[1] + std::get<0>(params)[1]) / - static_cast<float>(std::get<0>(params)[1])); + static_cast<std::size_t>(static_cast<float>(dims[1] - std::get<4>(attrs)[1] + std::get<0>(attrs)[1]) / + static_cast<float>(std::get<0>(attrs)[1])); // TODO: kernel computation // output (Xout, Yout, outCh, batch) // input (Xin, Yin, inCh, batch) // weight (kernelX, kernelY, inCh, outCh) - // does not take Dilation parameter into account + // does not take Dilation attribute into account for (std::size_t ox = 0; ox < oxSize; ++ox) { for (std::size_t oy = 0; oy < oySize; ++oy) { - const std::size_t ix = ox * std::get<0>(params)[0]; - const std::size_t iy = oy * std::get<0>(params)[1]; + const std::size_t ix = ox * std::get<0>(attrs)[0]; + const std::size_t iy = oy * std::get<0>(attrs)[1]; - for (std::size_t outCh = 0; outCh < std::get<3>(params); ++outCh) { - const std::size_t oIndex = dims[3] * (outCh + std::get<3>(params) * (oy + oySize * ox)); + for (std::size_t outCh = 0; outCh < std::get<3>(attrs); ++outCh) { + const std::size_t oIndex = dims[3] * (outCh + std::get<3>(attrs) * (oy + oySize * ox)); B biasVal = (biases != nullptr) ? biases[outCh] : B(0); for (std::size_t batch = 0; batch < dims[3]; ++batch) { output[oIndex + batch] = biasVal; } for (std::size_t inCh = 0; inCh < dims[2]; ++inCh) { - for (std::size_t sx = 0; sx < std::get<4>(params)[0]; ++sx) { - for (std::size_t sy = 0; sy < std::get<4>(params)[1]; ++sy) { + for (std::size_t sx = 0; sx < std::get<4>(attrs)[0]; ++sx) { + for (std::size_t sy = 0; sy < std::get<4>(attrs)[1]; ++sy) { const std::size_t wIndex = - outCh + std::get<3>(params) * (inCh + dims[2] * (sy + std::get<4>(params)[1] * sx)); + outCh + std::get<3>(attrs) * (inCh + dims[2] * (sy + std::get<4>(attrs)[1] * sx)); std::size_t iIndex = dims[3] * (inCh + dims[2] * ((iy + sy) + dims[1] * (ix + sx))); for (std::size_t batch = 0; batch < dims[3]; ++batch) { output[oIndex + batch] += weights[wIndex] * input[iIndex + batch]; @@ -88,53 +88,53 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Parameters ¶ms, const s // output H size const std::size_t oxSize = - static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<5>(params)[0] + std::get<5>(params)[2] - std::get<4>(params)[0] + std::get<0>(params)[0]) / - static_cast<float>(std::get<0>(params)[0]))); + static_cast<std::size_t>(std::floor(static_cast<float>(dims[2] + std::get<5>(attrs)[0] + std::get<5>(attrs)[2] - std::get<4>(attrs)[0] + std::get<0>(attrs)[0]) / + static_cast<float>(std::get<0>(attrs)[0]))); // output W size const std::size_t oySize = - static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<5>(params)[1] + std::get<5>(params)[3] - std::get<4>(params)[1] + std::get<0>(params)[1]) / - static_cast<float>(std::get<0>(params)[1]))); + static_cast<std::size_t>(std::floor(static_cast<float>(dims[3] + std::get<5>(attrs)[1] + std::get<5>(attrs)[3] - std::get<4>(attrs)[1] + std::get<0>(attrs)[1]) / + static_cast<float>(std::get<0>(attrs)[1]))); // TODO: kernel computation // output (batch, outCh, Xout, Yout) // input (batch, inCh, Xin, Yin) // weight (outCh, inCh, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { - for (std::size_t outCh = 0; outCh < std::get<3>(params); ++outCh) { - const std::size_t oIndex = (outCh + batch*std::get<3>(params)) * oxSize * oySize; + for (std::size_t outCh = 0; outCh < std::get<3>(attrs); ++outCh) { + const std::size_t oIndex = (outCh + batch*std::get<3>(attrs)) * oxSize * oySize; B biasVal = (biases != nullptr) ? biases[outCh] : B(0); std::fill(output + oIndex, output+(oIndex+oxSize*oySize), biasVal); for (std::size_t inCh = 0; inCh < dims[1]; ++inCh) { const std::size_t iIndex = (inCh + batch*dims[1]) * dims[2] * dims[3]; - const std::size_t wIndex = (inCh + outCh*dims[1]) * std::get<4>(params)[0] * std::get<4>(params)[1]; + const std::size_t wIndex = (inCh + outCh*dims[1]) * std::get<4>(attrs)[0] * std::get<4>(attrs)[1]; for (std::size_t ox = 0; ox < oxSize; ++ox) { - const signedsize difx = static_cast<signedsize>(std::get<5>(params)[0] - ox * std::get<0>(params)[0]); + const signedsize difx = static_cast<signedsize>(std::get<5>(attrs)[0] - ox * std::get<0>(attrs)[0]); const std::size_t sxMin = static_cast<std::size_t>(std::max(difx, signedsize(0))); - const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<4>(params)[0] ? std::get<4>(params)[0] : dims[2] + difx); + const std::size_t sxMax = (static_cast<signedsize>(dims[2]) + difx) < 0 ? 0 : ((dims[2] + difx) > std::get<4>(attrs)[0] ? std::get<4>(attrs)[0] : dims[2] + difx); for (std::size_t oy = 0; oy < oySize; ++oy) { - const signedsize dify = static_cast<signedsize>(std::get<5>(params)[1] - oy * std::get<0>(params)[1]); + const signedsize dify = static_cast<signedsize>(std::get<5>(attrs)[1] - oy * std::get<0>(attrs)[1]); const std::size_t syMin = static_cast<std::size_t>(std::max(dify, signedsize(0))); - const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<4>(params)[1] ? std::get<4>(params)[1] : dims[3] + dify); + const std::size_t syMax = (static_cast<signedsize>(dims[3]) + dify) < 0 ? 0 : ((dims[3] + dify) > std::get<4>(attrs)[1] ? std::get<4>(attrs)[1] : dims[3] + dify); const std::size_t oIndexFull = oIndex + ox*oySize + oy; - const signedsize ix = static_cast<signedsize>(ox * std::get<0>(params)[0]) - std::get<5>(params)[0]; - const signedsize iy = static_cast<signedsize>(oy * std::get<0>(params)[1]) - std::get<5>(params)[1]; + const signedsize ix = static_cast<signedsize>(ox * std::get<0>(attrs)[0]) - std::get<5>(attrs)[0]; + const signedsize iy = static_cast<signedsize>(oy * std::get<0>(attrs)[1]) - std::get<5>(attrs)[1]; if (sxMin == 0 && syMin == 0 && sxMax == 3 && syMax == 3) { - output[oIndexFull] += (weights[wIndex + 0*std::get<4>(params)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] + - weights[wIndex + 0*std::get<4>(params)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+1)] + - weights[wIndex + 0*std::get<4>(params)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+2)] + - weights[wIndex + 1*std::get<4>(params)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+0)] + - weights[wIndex + 1*std::get<4>(params)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+1)] + - weights[wIndex + 1*std::get<4>(params)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+2)] + - weights[wIndex + 2*std::get<4>(params)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+0)] + - weights[wIndex + 2*std::get<4>(params)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+1)] + - weights[wIndex + 2*std::get<4>(params)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+2)]); + output[oIndexFull] += (weights[wIndex + 0*std::get<4>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+0)] + + weights[wIndex + 0*std::get<4>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+1)] + + weights[wIndex + 0*std::get<4>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+0)*dims[3] + static_cast<std::size_t>(iy+2)] + + weights[wIndex + 1*std::get<4>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+0)] + + weights[wIndex + 1*std::get<4>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+1)] + + weights[wIndex + 1*std::get<4>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+1)*dims[3] + static_cast<std::size_t>(iy+2)] + + weights[wIndex + 2*std::get<4>(attrs)[1] + 0] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+0)] + + weights[wIndex + 2*std::get<4>(attrs)[1] + 1] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+1)] + + weights[wIndex + 2*std::get<4>(attrs)[1] + 2] * input[iIndex + static_cast<std::size_t>(ix+2)*dims[3] + static_cast<std::size_t>(iy+2)]); } else { for (std::size_t sx = sxMin; sx < sxMax; ++sx) { for (std::size_t sy = syMin; sy < syMax; ++sy) { - output[oIndexFull] += weights[wIndex + sx*std::get<4>(params)[1] + sy] * + output[oIndexFull] += weights[wIndex + sx*std::get<4>(attrs)[1] + sy] * input[iIndex + static_cast<std::size_t>(ix+static_cast<signedsize>(sx))*dims[3] + static_cast<std::size_t>(iy+static_cast<signedsize>(sy))]; } } diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp index c69cc0b08a58877108c78d6f12c29e9089c2f665..1dfa40439dbba9cdd4fe3436fea30f771678c1ff 100644 --- a/include/aidge/backend/cpu/operator/FCImpl.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl.hpp @@ -26,11 +26,11 @@ namespace Aidge { // compute kernel registry for forward and backward class FCImplForward_cpu : public Registrable<FCImplForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Parameters &, const DimSize_t, const DimSize_t, + void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, const void *, void *)> {}; class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Parameters &, const DimSize_t, const DimSize_t, + void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, const void *, void *)> {}; class FCImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp index d6acb7dfea3415a8d67384745e16ecdd8bf06324..91e2558a7ef1079cbc9fb11f78fab53ef4246149 100644 --- a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp @@ -19,17 +19,17 @@ namespace Aidge { // template <class I, class W, class B, class O> -// void FCImpl_cpu_forward_kernel(const FC_Op::Parameters& params, const std::array<DimSize_t, 4>& dims, +// void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& attrs, const std::array<DimSize_t, 4>& dims, // const void* input_, const void* weights_, const void* biases_, void* output_) { -// // FIXME: missing FC parameters as arguments +// // FIXME: missing FC attributes as arguments // const I* input = static_cast<const I*>(input_); // const W* weights = static_cast<const W*>(weights_); // const B* biases = static_cast<const B*>(biases_); // O* output = static_cast<O*>(output_); -// for (std::size_t outIdx = 0; outIdx < std::get<0>(params); ++outIdx) { +// for (std::size_t outIdx = 0; outIdx < std::get<0>(attrs); ++outIdx) { // std::size_t oIndex = outIdx * dims[3]; -// const B bias = std::get<1>(params) ? B(0) : biases[outIdx]; +// const B bias = std::get<1>(attrs) ? B(0) : biases[outIdx]; // for (std::size_t batch = 0; batch < dims[3]; ++batch) { // output[oIndex + batch] = bias; // } @@ -39,10 +39,10 @@ namespace Aidge { // for (std::size_t iy = 0; iy < dims[1]; ++iy) { // for (std::size_t inCh = 0; inCh < dims[2]; ++inCh) { // const std::size_t iIndex = dims[3] * (inCh + dims[2] * (iy + dims[1] * ix)); -// for (std::size_t outCh = 0; outCh < std::get<0>(params); ++outCh) { +// for (std::size_t outCh = 0; outCh < std::get<0>(attrs); ++outCh) { // const std::size_t oIndex = dims[3] * outCh; -// const std::size_t wIndex = (inCh + dims[2] * (iy + dims[1] * ix)) * std::get<0>(params) + -// outCh; // (iIndex*std::get<0>(params) + oIndex)/dims[3]; +// const std::size_t wIndex = (inCh + dims[2] * (iy + dims[1] * ix)) * std::get<0>(attrs) + +// outCh; // (iIndex*std::get<0>(attrs) + oIndex)/dims[3]; // for (std::size_t batch = 0; batch < dims[3]; ++batch) { // output[oIndex + batch] += weights[wIndex] * input[iIndex + batch]; // } @@ -53,9 +53,9 @@ namespace Aidge { // } // template <class I, class W, class B, class O> -// void FCImpl_cpu_forward_kernel(const FC_Op::Parameters& params, const std::array<DimSize_t, 2>& dims, +// void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& attrs, const std::array<DimSize_t, 2>& dims, // const void* input_, const void* weights_, const void* biases_, void* output_) { -// // FIXME: missing FC parameters as arguments +// // FIXME: missing FC attributes as arguments // const I* input = static_cast<const I*>(input_); // const W* weights = static_cast<const W*>(weights_); // const B* biases = static_cast<const B*>(biases_); @@ -63,9 +63,9 @@ namespace Aidge { // // let's have I.dims() = [N, C, H, W] instead of [H, W, C, N] -// for (std::size_t outIdx = 0; outIdx < std::get<0>(params); ++outIdx) { +// for (std::size_t outIdx = 0; outIdx < std::get<0>(attrs); ++outIdx) { // std::size_t oIndex = outIdx * dims[0]; -// const B bias = std::get<1>(params) ? B(0) : biases[outIdx]; +// const B bias = std::get<1>(attrs) ? B(0) : biases[outIdx]; // for (std::size_t batch = 0; batch < dims[0]; ++batch) { // output[oIndex + batch] = bias; // } @@ -74,8 +74,8 @@ namespace Aidge { // for (std::size_t batch = 0; batch < dims[0]; ++batch) { // const std::size_t oIndex = dims[1] * batch; // for (std::size_t i = 0; i < dims[1]; ++i) { -// for (std::size_t outCh = 0; outCh < std::get<0>(params); ++outCh) { -// std::size_t wIndex = i * std::get<0>(params) + outCh; // (iIndex*std::get<0>(params) + oIndex)/dims[3]; +// for (std::size_t outCh = 0; outCh < std::get<0>(attrs); ++outCh) { +// std::size_t wIndex = i * std::get<0>(attrs) + outCh; // (iIndex*std::get<0>(attrs) + oIndex)/dims[3]; // output[oIndex + outCh] += weights[wIndex] * input[i + batch]; // } // } @@ -83,29 +83,29 @@ namespace Aidge { // } template <class I, class W, class B, class O> -void FCImpl_cpu_forward_kernel(const FC_Op::Parameters& params, const DimSize_t batchSize, const DimSize_t oneInputSize, +void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize, const void* input_, const void* weights_, const void* biases_, void* output_) { - // FIXME: missing FC parameters as arguments + // FIXME: missing FC attributes as arguments const I* input = static_cast<const I*>(input_); const W* weights = static_cast<const W*>(weights_); const B* biases = static_cast<const B*>(biases_); O* output = static_cast<O*>(output_); - if (std::get<1>(params)) { - std::fill(output, output+(batchSize*std::get<0>(params)), B(0)); + if (std::get<1>(attrs)) { + std::fill(output, output+(batchSize*std::get<0>(attrs)), B(0)); } else { for (std::size_t batch = 0; batch < batchSize; ++batch) { - std::copy(biases, biases+std::get<0>(params), output+(batch*std::get<0>(params))); + std::copy(biases, biases+std::get<0>(attrs), output+(batch*std::get<0>(attrs))); } } for (std::size_t batch = 0; batch < batchSize; ++batch) { - for (std::size_t out = 0; out < std::get<0>(params); ++out) { - output[out + batch*std::get<0>(params)] = std::inner_product(input + batch*oneInputSize, + for (std::size_t out = 0; out < std::get<0>(attrs); ++out) { + output[out + batch*std::get<0>(attrs)] = std::inner_product(input + batch*oneInputSize, input + (batch + 1)*oneInputSize, weights + out*oneInputSize, - output[out + batch*std::get<0>(params)]); + output[out + batch*std::get<0>(attrs)]); } } } diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp index abe167bea16de01f861beb9701f747d39f265d9d..386ef999fddbda184edee88723d213f53ff62ded 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp @@ -24,10 +24,10 @@ namespace Aidge { // compute kernel registry for forward and backward class LeakyReLUImplForward_cpu - : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Parameters&, std::size_t, const void*, void*)> { + : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { }; class LeakyReLUImplBackward_cpu - : public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Parameters&, std::size_t, const void*, void*)> { + : public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { }; class LeakyReLUImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp index ff9a8ac6a8f968f244429b330401d794f16fac01..761b9579c3c3dc187e4b0fac24812fa77f916e65 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp @@ -18,14 +18,14 @@ namespace Aidge { template <class I, class O> -void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Parameters& params, +void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Attrs& attrs, std::size_t inputLenght, const void* input_, void* output_) { const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); - I negativeSlope = static_cast<I>(std::get<0>(params)); + I negativeSlope = static_cast<I>(std::get<0>(attrs)); for (std::size_t i = 0; i < inputLenght; ++i) { output[i] = input[i] >= 0 ? input[i] : input[i] * negativeSlope; diff --git a/include/aidge/backend/cpu/operator/ScalingImpl.hpp b/include/aidge/backend/cpu/operator/ScalingImpl.hpp index 6e75b6f42d565a481021bdbba17ee0e637f4707e..37549349b9f5ffbf443d976135db05b4cec209b7 100644 --- a/include/aidge/backend/cpu/operator/ScalingImpl.hpp +++ b/include/aidge/backend/cpu/operator/ScalingImpl.hpp @@ -18,16 +18,17 @@ #include "aidge/utils/Types.h" #include <memory> #include <vector> +#include <array> namespace Aidge { // class Scaling_Op; // compute kernel registry for forward and backward class ScalingImplForward_cpu - : public Registrable<ScalingImplForward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> { + : public Registrable<ScalingImplForward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Attrs&, std::size_t, const void*, void*)> { }; class ScalingImplBackward_cpu - : public Registrable<ScalingImplBackward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Parameters&, std::size_t, const void*, void*)> { + : public Registrable<ScalingImplBackward_cpu, std::tuple<DataType, DataType>, void(const Scaling_Op::Attrs&, std::size_t, const void*, void*)> { }; class ScalingImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp index c5b06290ee04ecf9759f418cd26d83e889fcc84e..8fe13bce3a4c470d77b083603d3b889a46fda71f 100644 --- a/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ScalingImpl_forward_kernels.hpp @@ -18,14 +18,14 @@ namespace Aidge { template <class I, class O> -void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Parameters& params, +void ScalingImpl_cpu_forward_kernel(const Scaling_Op::Attrs& attrs, std::size_t inputLenght, const void* input_, void* output_) { const I* input = static_cast<const I*>(input_); O* output = static_cast<O*>(output_); - I scalingFactor = static_cast<I>(std::get<0>(params)); + const I& scalingFactor = static_cast<const I&>(std::get<0>(attrs)); for (std::size_t i = 0; i < inputLenght; ++i) { output[i] = input[i] * scalingFactor; diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp index 6c434a5c38853a1dee66db5be95b6b1bfdde8162..b1f82bbb4323a402d698d772966409e1a8f7224b 100644 --- a/src/operator/AvgPoolingImpl.cpp +++ b/src/operator/AvgPoolingImpl.cpp @@ -70,7 +70,7 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() { Registrar<AvgPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getParams(), + kernelFunc(mOp.getStaticAttributes(), mOp.getInput(0)->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp index a0d4d032ded9ede1b2dba307aa967af330167d25..90ee2b7a2361166109568e317a1788137150a8d1 100644 --- a/src/operator/BatchNormImpl.cpp +++ b/src/operator/BatchNormImpl.cpp @@ -76,7 +76,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() { mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getParams(), + kernelFunc(mOp.getStaticAttributes(), mOp.getInput(0)->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp index 3e920cf68366b82bce8df29c8aea0c838e6a1364..7801f64ef46ced22d95af47b8b0e8cc9888a81da 100644 --- a/src/operator/ConvDepthWiseImpl.cpp +++ b/src/operator/ConvDepthWiseImpl.cpp @@ -77,7 +77,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() { mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); } diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp index b4ddf80929923a9c2c5998ac8614ebb0d3afe000..edab4432fd5792f27ea158f265641855532d6d0b 100644 --- a/src/operator/ConvImpl.cpp +++ b/src/operator/ConvImpl.cpp @@ -75,7 +75,7 @@ void Aidge::ConvImpl2D_cpu::forward() { mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getParams(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp index 086902be0ab1c2027a8c62c143bc27921e5e9e1b..3cf1ccf6e951ea05521ef67c99a3e628e0f620f5 100644 --- a/src/operator/FCImpl.cpp +++ b/src/operator/FCImpl.cpp @@ -98,7 +98,7 @@ void Aidge::FCImpl_cpu::forward() // Call kernel // if (mOp.getInput(0)->nbDims() == 4) { // kernelFunc( - // mOp.getParams(), + // mOp.getStaticAttributes(), // std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), // mOp.getInput(0)->getImpl()->rawPtr(), // mOp.mInputs[1]->getImpl()->rawPtr(), @@ -107,7 +107,7 @@ void Aidge::FCImpl_cpu::forward() // } // else kernelFunc( - mOp.getParams(), + mOp.getStaticAttributes(), mOp.getInput(0)->dims()[0], mOp.getInput(0)->sizeM1(), mOp.getInput(0)->getImpl()->rawPtr(), diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp index f6a44d381081c7c7f1dcbbf02d91212168cc07aa..316d3641bb960ed8850a94f40186b77cc8522b58 100644 --- a/src/operator/LeakyReLUImpl.cpp +++ b/src/operator/LeakyReLUImpl.cpp @@ -65,7 +65,7 @@ void Aidge::LeakyReLUImpl_cpu::forward() { mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getParams(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/src/operator/ScalingImpl.cpp b/src/operator/ScalingImpl.cpp index c6a96f3bc8ea865da1c31ddfadff67c1e8556ad5..84cd6ee33a8316a24bae472c74c039dabe0afba3 100644 --- a/src/operator/ScalingImpl.cpp +++ b/src/operator/ScalingImpl.cpp @@ -68,7 +68,7 @@ void Aidge::ScalingImpl_cpu::forward() { mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getParams(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/unit_tests/operator/Test_LeakyReLUImpl.cpp b/unit_tests/operator/Test_LeakyReLUImpl.cpp index 7096962e196c2ace4abf2b0b14aca8dfa37d3441..d5bd91ff75404a7b928c8919c64e06315b78206f 100644 --- a/unit_tests/operator/Test_LeakyReLUImpl.cpp +++ b/unit_tests/operator/Test_LeakyReLUImpl.cpp @@ -153,7 +153,7 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") { REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput); } - SECTION("Test construction parameter: negative_slop") { + SECTION("Test construction attribute: negative_slop") { std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> { {0.0f, 1.0f, 2.0f,-3.0f, 4.0f,-5.0f,-6.0f, 7.0f, 8.0f, 9.0f} });