From d26753dfea52bf5344baec2b33307fea7da02b6f Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Mon, 2 Oct 2023 11:31:01 +0200 Subject: [PATCH] Renamed parameter to attribute --- .../backend/cpu/operator/AddImpl_forward_kernels.hpp | 6 +++--- .../aidge/backend/cpu/operator/AvgPoolingImpl.hpp | 4 ++-- .../cpu/operator/AvgPoolingImpl_forward_kernels.hpp | 8 ++++---- include/aidge/backend/cpu/operator/BatchNormImpl.hpp | 4 ++-- .../cpu/operator/BatchNormImpl_forward_kernels.hpp | 6 +++--- .../aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp | 4 ++-- .../operator/ConvDepthWiseImpl_forward_kernels.hpp | 8 ++++---- include/aidge/backend/cpu/operator/ConvImpl.hpp | 4 ++-- .../cpu/operator/ConvImpl_forward_kernels.hpp | 10 +++++----- include/aidge/backend/cpu/operator/FCImpl.hpp | 4 ++-- .../backend/cpu/operator/FCImpl_forward_kernels.hpp | 12 ++++++------ include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp | 4 ++-- .../cpu/operator/LeakyReLUImpl_forward_kernels.hpp | 2 +- src/operator/AvgPoolingImpl.cpp | 2 +- src/operator/BatchNormImpl.cpp | 2 +- src/operator/ConvDepthWiseImpl.cpp | 2 +- src/operator/ConvImpl.cpp | 2 +- src/operator/FCImpl.cpp | 4 ++-- src/operator/LeakyReLUImpl.cpp | 2 +- unit_tests/operator/Test_LeakyReLUImpl.cpp | 2 +- 20 files changed, 46 insertions(+), 46 deletions(-) diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp index 49059859..221e36dc 100644 --- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp @@ -20,7 +20,7 @@ namespace Aidge { template <class I1, class O> void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); O* output = static_cast<O*>(output_); @@ -32,7 +32,7 @@ void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* inp template <class I1, class I2, class O> void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); const I2* input2 = static_cast<const I2*>(input2_); O* output = static_cast<O*>(output_); @@ -45,7 +45,7 @@ void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* inp template <class I1, class I2, class I3, class O> void AddImpl3I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_, const void* input3_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); const I2* input2 = static_cast<const I2*>(input2_); const I3* input3 = static_cast<const I3*>(input3_); diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp index 6768a4f1..cfbcadfe 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp @@ -29,11 +29,11 @@ namespace Aidge { class AvgPoolingImpl2DForward_cpu : public Registrable<AvgPoolingImpl2DForward_cpu, std::tuple<DataType, DataType>, - void(const AvgPooling_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; + void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; class AvgPoolingImpl2DBackward_cpu : public Registrable<AvgPoolingImpl2DBackward_cpu, std::tuple<DataType, DataType>, - void(const AvgPooling_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; + void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; class AvgPoolingImpl2D_cpu : public OperatorImpl { private: diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp index db87fe85..5e9104d6 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp @@ -26,17 +26,17 @@ namespace Aidge { * @brief Forward kernel for 2D AvgPoolingolution on CPU backend. * @tparam I Input data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param output_ Output Tensor. */ template <class I, class O> -void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Params ¶ms, +void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); O *output = static_cast<O *>(output_); @@ -54,7 +54,7 @@ void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Params ¶ms, // output (batch, outCh, Xout, Yout) // input (batch, ch, Xin, Yin) // weight (outCh, ch, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t ch = 0; ch < dims[1]; ++ch) { diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp index 902dccf4..30557f6c 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp @@ -29,7 +29,7 @@ namespace Aidge { class BatchNormImpl2DForward_cpu : public Registrable<BatchNormImpl2DForward_cpu, std::tuple<DataType, DataType, DataType>, - void(const BatchNorm_Op<2>::Params &, + void(const BatchNorm_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, @@ -41,7 +41,7 @@ class BatchNormImpl2DForward_cpu class BatchNormImpl2DBackward_cpu : public Registrable<BatchNormImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType>, - void(const BatchNorm_Op<2>::Params &, + void(const BatchNorm_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp index c3c2eb00..e46348f9 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param scale_ const scale Tensor. @@ -37,9 +37,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class P, class O> -void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Params ¶ms, const std::array<DimSize_t, 4> &dims, +void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, const void *scale_, const void *shift_, void *batchMean_, void *batchVar_, void *output_, const bool freeze) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const P *scale = static_cast<const P *>(scale_); const P *shift = static_cast<const P *>(shift_); diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp index bd18257e..2826b635 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp @@ -29,12 +29,12 @@ namespace Aidge { class ConvDepthWiseImpl2DForward_cpu : public Registrable<ConvDepthWiseImpl2DForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const ConvDepthWise_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const ConvDepthWise_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvDepthWiseImpl2DBackward_cpu : public Registrable<ConvDepthWiseImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const ConvDepthWise_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const ConvDepthWise_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvDepthWiseImpl2D_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp index fb255982..885115d5 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param weights_ const weight Tensor. @@ -35,9 +35,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class W, class B, class O> -void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Params ¶ms, const std::array<DimSize_t, 4> &dims, +void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, const void *weights_, const void *biases_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const W *weights = static_cast<const W *>(weights_); const B *biases = static_cast<const B *>(biases_); @@ -57,7 +57,7 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Params &p // output (batch, outCh, Xout, Yout) // input (batch, ch, Xin, Yin) // weight (outCh, ch, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t ch = 0; ch < std::get<2>(params); ++ch) { diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp index eab08ff4..b9411fe0 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp @@ -29,12 +29,12 @@ namespace Aidge { class ConvImpl2DForward_cpu : public Registrable<ConvImpl2DForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const Conv_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const Conv_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvImpl2DBackward_cpu : public Registrable<ConvImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const Conv_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const Conv_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvImpl2D_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp index c9bf3b8d..5594927e 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param weights_ const weight Tensor. @@ -35,9 +35,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class W, class B, class O> -void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Params ¶ms, const std::array<DimSize_t, 4> &dims, +void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, const void *weights_, const void *biases_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const W *weights = static_cast<const W *>(weights_); const B *biases = static_cast<const B *>(biases_); @@ -56,7 +56,7 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Params ¶ms, const std:: // output (Xout, Yout, outCh, batch) // input (Xin, Yin, inCh, batch) // weight (kernelX, kernelY, inCh, outCh) - // does not take Dilation parameter into account + // does not take Dilation attribute into account for (std::size_t ox = 0; ox < oxSize; ++ox) { for (std::size_t oy = 0; oy < oySize; ++oy) { const std::size_t ix = ox * std::get<0>(params)[0]; @@ -99,7 +99,7 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Params ¶ms, const std:: // output (batch, outCh, Xout, Yout) // input (batch, inCh, Xin, Yin) // weight (outCh, inCh, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t outCh = 0; outCh < std::get<3>(params); ++outCh) { diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp index 22905739..1dfa4043 100644 --- a/include/aidge/backend/cpu/operator/FCImpl.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl.hpp @@ -26,11 +26,11 @@ namespace Aidge { // compute kernel registry for forward and backward class FCImplForward_cpu : public Registrable<FCImplForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Params &, const DimSize_t, const DimSize_t, + void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, const void *, void *)> {}; class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Params &, const DimSize_t, const DimSize_t, + void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, const void *, void *)> {}; class FCImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp index 3e8b3e34..2b639a73 100644 --- a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp @@ -19,9 +19,9 @@ namespace Aidge { // template <class I, class W, class B, class O> -// void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const std::array<DimSize_t, 4>& dims, +// void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& params, const std::array<DimSize_t, 4>& dims, // const void* input_, const void* weights_, const void* biases_, void* output_) { -// // FIXME: missing FC parameters as arguments +// // FIXME: missing FC attributes as arguments // const I* input = static_cast<const I*>(input_); // const W* weights = static_cast<const W*>(weights_); // const B* biases = static_cast<const B*>(biases_); @@ -53,9 +53,9 @@ namespace Aidge { // } // template <class I, class W, class B, class O> -// void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const std::array<DimSize_t, 2>& dims, +// void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& params, const std::array<DimSize_t, 2>& dims, // const void* input_, const void* weights_, const void* biases_, void* output_) { -// // FIXME: missing FC parameters as arguments +// // FIXME: missing FC attributes as arguments // const I* input = static_cast<const I*>(input_); // const W* weights = static_cast<const W*>(weights_); // const B* biases = static_cast<const B*>(biases_); @@ -83,9 +83,9 @@ namespace Aidge { // } template <class I, class W, class B, class O> -void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const DimSize_t batchSize, const DimSize_t oneInputSize, +void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& params, const DimSize_t batchSize, const DimSize_t oneInputSize, const void* input_, const void* weights_, const void* biases_, void* output_) { - // FIXME: missing FC parameters as arguments + // FIXME: missing FC attributes as arguments const I* input = static_cast<const I*>(input_); const W* weights = static_cast<const W*>(weights_); const B* biases = static_cast<const B*>(biases_); diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp index 48a13a54..386ef999 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp @@ -24,10 +24,10 @@ namespace Aidge { // compute kernel registry for forward and backward class LeakyReLUImplForward_cpu - : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Params&, std::size_t, const void*, void*)> { + : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { }; class LeakyReLUImplBackward_cpu - : public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Params&, std::size_t, const void*, void*)> { + : public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { }; class LeakyReLUImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp index 68d60b0b..a4a926e8 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp @@ -18,7 +18,7 @@ namespace Aidge { template <class I, class O> -void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Params& params, +void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Attrs& params, std::size_t inputLenght, const void* input_, void* output_) { diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp index 137bf639..b1f82bbb 100644 --- a/src/operator/AvgPoolingImpl.cpp +++ b/src/operator/AvgPoolingImpl.cpp @@ -70,7 +70,7 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() { Registrar<AvgPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), + kernelFunc(mOp.getStaticAttributes(), mOp.getInput(0)->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp index 9ced036e..90ee2b7a 100644 --- a/src/operator/BatchNormImpl.cpp +++ b/src/operator/BatchNormImpl.cpp @@ -76,7 +76,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() { mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), + kernelFunc(mOp.getStaticAttributes(), mOp.getInput(0)->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp index 9e11b4ea..7801f64e 100644 --- a/src/operator/ConvDepthWiseImpl.cpp +++ b/src/operator/ConvDepthWiseImpl.cpp @@ -77,7 +77,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() { mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); } diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp index 97e73ce5..edab4432 100644 --- a/src/operator/ConvImpl.cpp +++ b/src/operator/ConvImpl.cpp @@ -75,7 +75,7 @@ void Aidge::ConvImpl2D_cpu::forward() { mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp index 540ecdf3..3cf1ccf6 100644 --- a/src/operator/FCImpl.cpp +++ b/src/operator/FCImpl.cpp @@ -98,7 +98,7 @@ void Aidge::FCImpl_cpu::forward() // Call kernel // if (mOp.getInput(0)->nbDims() == 4) { // kernelFunc( - // mOp.getStaticParameters(), + // mOp.getStaticAttributes(), // std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), // mOp.getInput(0)->getImpl()->rawPtr(), // mOp.mInputs[1]->getImpl()->rawPtr(), @@ -107,7 +107,7 @@ void Aidge::FCImpl_cpu::forward() // } // else kernelFunc( - mOp.getStaticParameters(), + mOp.getStaticAttributes(), mOp.getInput(0)->dims()[0], mOp.getInput(0)->sizeM1(), mOp.getInput(0)->getImpl()->rawPtr(), diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp index 46b7224f..316d3641 100644 --- a/src/operator/LeakyReLUImpl.cpp +++ b/src/operator/LeakyReLUImpl.cpp @@ -65,7 +65,7 @@ void Aidge::LeakyReLUImpl_cpu::forward() { mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/unit_tests/operator/Test_LeakyReLUImpl.cpp b/unit_tests/operator/Test_LeakyReLUImpl.cpp index 7096962e..d5bd91ff 100644 --- a/unit_tests/operator/Test_LeakyReLUImpl.cpp +++ b/unit_tests/operator/Test_LeakyReLUImpl.cpp @@ -153,7 +153,7 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") { REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput); } - SECTION("Test construction parameter: negative_slop") { + SECTION("Test construction attribute: negative_slop") { std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> { {0.0f, 1.0f, 2.0f,-3.0f, 4.0f,-5.0f,-6.0f, 7.0f, 8.0f, 9.0f} }); -- GitLab