diff --git a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp index 490598599aedf24b26865ce6a1ddb3fe32044b1b..221e36dcfac44e21d1b1a35674ca21403b4b57ab 100644 --- a/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AddImpl_forward_kernels.hpp @@ -20,7 +20,7 @@ namespace Aidge { template <class I1, class O> void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); O* output = static_cast<O*>(output_); @@ -32,7 +32,7 @@ void AddImpl1I_cpu_forward_kernel(const std::size_t inputLength, const void* inp template <class I1, class I2, class O> void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); const I2* input2 = static_cast<const I2*>(input2_); O* output = static_cast<O*>(output_); @@ -45,7 +45,7 @@ void AddImpl2I_cpu_forward_kernel(const std::size_t inputLength, const void* inp template <class I1, class I2, class I3, class O> void AddImpl3I_cpu_forward_kernel(const std::size_t inputLength, const void* input1_, const void* input2_, const void* input3_, void* output_) { - // FIXME: missing Add parameters as arguments + // FIXME: missing Add attributes as arguments const I1* input1 = static_cast<const I1*>(input1_); const I2* input2 = static_cast<const I2*>(input2_); const I3* input3 = static_cast<const I3*>(input3_); diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp index 6768a4f1f01941da51f9a22552b598493deac2c1..cfbcadfe6b719369618955a14c4cde5733ef6773 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl.hpp @@ -29,11 +29,11 @@ namespace Aidge { class AvgPoolingImpl2DForward_cpu : public Registrable<AvgPoolingImpl2DForward_cpu, std::tuple<DataType, DataType>, - void(const AvgPooling_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; + void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; class AvgPoolingImpl2DBackward_cpu : public Registrable<AvgPoolingImpl2DBackward_cpu, std::tuple<DataType, DataType>, - void(const AvgPooling_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; + void(const AvgPooling_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, void *)> {}; class AvgPoolingImpl2D_cpu : public OperatorImpl { private: diff --git a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp index db87fe85fe6824464d9651c04b19e48ce21598b4..5e9104d663d0d9f78a0b70f2476895ea5b27c16e 100644 --- a/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/AvgPoolingImpl_forward_kernels.hpp @@ -26,17 +26,17 @@ namespace Aidge { * @brief Forward kernel for 2D AvgPoolingolution on CPU backend. * @tparam I Input data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param output_ Output Tensor. */ template <class I, class O> -void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Params ¶ms, +void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); O *output = static_cast<O *>(output_); @@ -54,7 +54,7 @@ void AvgPoolingImpl2D_cpu_forward_kernel(const AvgPooling_Op<2>::Params ¶ms, // output (batch, outCh, Xout, Yout) // input (batch, ch, Xin, Yin) // weight (outCh, ch, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t ch = 0; ch < dims[1]; ++ch) { diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp index 902dccf4345b74eb3b3561ae4dde115993826817..30557f6cbba05829b3cc9e17364ae4d933a568cf 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl.hpp @@ -29,7 +29,7 @@ namespace Aidge { class BatchNormImpl2DForward_cpu : public Registrable<BatchNormImpl2DForward_cpu, std::tuple<DataType, DataType, DataType>, - void(const BatchNorm_Op<2>::Params &, + void(const BatchNorm_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, @@ -41,7 +41,7 @@ class BatchNormImpl2DForward_cpu class BatchNormImpl2DBackward_cpu : public Registrable<BatchNormImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType>, - void(const BatchNorm_Op<2>::Params &, + void(const BatchNorm_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, diff --git a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp index c3c2eb0036e2495007bc3f00a66e09d20ae28e4f..e46348f922a6c97453f1dd3c110bd0b0dc87cd99 100644 --- a/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/BatchNormImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param scale_ const scale Tensor. @@ -37,9 +37,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class P, class O> -void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Params ¶ms, const std::array<DimSize_t, 4> &dims, +void BatchNormImpl2D_cpu_forward_kernel(const BatchNorm_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, const void *scale_, const void *shift_, void *batchMean_, void *batchVar_, void *output_, const bool freeze) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const P *scale = static_cast<const P *>(scale_); const P *shift = static_cast<const P *>(shift_); diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp index bd18257e34ab481441005d78d8be4574e29f8d12..2826b635590c5d19f34c8e4beee20fc8dba2183b 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl.hpp @@ -29,12 +29,12 @@ namespace Aidge { class ConvDepthWiseImpl2DForward_cpu : public Registrable<ConvDepthWiseImpl2DForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const ConvDepthWise_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const ConvDepthWise_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvDepthWiseImpl2DBackward_cpu : public Registrable<ConvDepthWiseImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const ConvDepthWise_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const ConvDepthWise_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvDepthWiseImpl2D_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp index fb2559824866c855f923e3bc7ea40570cdd214d6..885115d54319bacb3089cad8cf1a0f59e2618ad7 100644 --- a/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvDepthWiseImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param weights_ const weight Tensor. @@ -35,9 +35,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class W, class B, class O> -void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Params ¶ms, const std::array<DimSize_t, 4> &dims, +void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, const void *weights_, const void *biases_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const W *weights = static_cast<const W *>(weights_); const B *biases = static_cast<const B *>(biases_); @@ -57,7 +57,7 @@ void ConvDepthWiseImpl2D_cpu_forward_kernel(const ConvDepthWise_Op<2>::Params &p // output (batch, outCh, Xout, Yout) // input (batch, ch, Xin, Yin) // weight (outCh, ch, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t ch = 0; ch < std::get<2>(params); ++ch) { diff --git a/include/aidge/backend/cpu/operator/ConvImpl.hpp b/include/aidge/backend/cpu/operator/ConvImpl.hpp index eab08ff497790162c67a658c119316a00c18d559..b9411fe0f1ac079d9857cc8f2178fc98fadc3a77 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl.hpp @@ -29,12 +29,12 @@ namespace Aidge { class ConvImpl2DForward_cpu : public Registrable<ConvImpl2DForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const Conv_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const Conv_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvImpl2DBackward_cpu : public Registrable<ConvImpl2DBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const Conv_Op<2>::Params &, const std::array<DimSize_t, 4> &, const void *, + void(const Conv_Op<2>::Attrs &, const std::array<DimSize_t, 4> &, const void *, const void *, const void *, void *)> {}; class ConvImpl2D_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp index c9bf3b8db2a61d94ab174f2ce5083dfe4fab0cd7..5594927ebfc777ed2b9d9cd632a7e60e0bf8cca1 100644 --- a/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/ConvImpl_forward_kernels.hpp @@ -27,7 +27,7 @@ namespace Aidge { * @tparam W Weight data type. * @tparam B Bias data type. * @tparam O Output data type. - * @param params tuple of Parameters from the Operator + * @param params tuple of Attributes from the Operator * @param dims Array of input dimensions. * @param input_ const input Tensor. * @param weights_ const weight Tensor. @@ -35,9 +35,9 @@ namespace Aidge { * @param output_ Output Tensor. */ template <class I, class W, class B, class O> -void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Params ¶ms, const std::array<DimSize_t, 4> &dims, +void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Attrs ¶ms, const std::array<DimSize_t, 4> &dims, const void *input_, const void *weights_, const void *biases_, void *output_) { - // FIXME: missing convolution parameters as arguments + // FIXME: missing convolution attributes as arguments const I *input = static_cast<const I *>(input_); const W *weights = static_cast<const W *>(weights_); const B *biases = static_cast<const B *>(biases_); @@ -56,7 +56,7 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Params ¶ms, const std:: // output (Xout, Yout, outCh, batch) // input (Xin, Yin, inCh, batch) // weight (kernelX, kernelY, inCh, outCh) - // does not take Dilation parameter into account + // does not take Dilation attribute into account for (std::size_t ox = 0; ox < oxSize; ++ox) { for (std::size_t oy = 0; oy < oySize; ++oy) { const std::size_t ix = ox * std::get<0>(params)[0]; @@ -99,7 +99,7 @@ void ConvImpl2D_cpu_forward_kernel(const Conv_Op<2>::Params ¶ms, const std:: // output (batch, outCh, Xout, Yout) // input (batch, inCh, Xin, Yin) // weight (outCh, inCh, kernelX, kernelY) - // does not take Dilation parameter into account + // does not take Dilation attribute into account using signedsize = std::make_signed<std::size_t>::type; for (std::size_t batch = 0; batch < dims[0]; ++batch) { for (std::size_t outCh = 0; outCh < std::get<3>(params); ++outCh) { diff --git a/include/aidge/backend/cpu/operator/FCImpl.hpp b/include/aidge/backend/cpu/operator/FCImpl.hpp index 2290573936ed6a8aec509d7fcfa276fc12bf60cb..1dfa40439dbba9cdd4fe3436fea30f771678c1ff 100644 --- a/include/aidge/backend/cpu/operator/FCImpl.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl.hpp @@ -26,11 +26,11 @@ namespace Aidge { // compute kernel registry for forward and backward class FCImplForward_cpu : public Registrable<FCImplForward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Params &, const DimSize_t, const DimSize_t, + void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, const void *, void *)> {}; class FCImplBackward_cpu : public Registrable<FCImplBackward_cpu, std::tuple<DataType, DataType, DataType, DataType>, - void(const FC_Op::Params &, const DimSize_t, const DimSize_t, + void(const FC_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, const void *, void *)> {}; class FCImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp index 3e8b3e347225e1141916270f012d2a76bbef5abe..2b639a73c4e38be15c14137e6b291cef47d3396c 100644 --- a/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/FCImpl_forward_kernels.hpp @@ -19,9 +19,9 @@ namespace Aidge { // template <class I, class W, class B, class O> -// void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const std::array<DimSize_t, 4>& dims, +// void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& params, const std::array<DimSize_t, 4>& dims, // const void* input_, const void* weights_, const void* biases_, void* output_) { -// // FIXME: missing FC parameters as arguments +// // FIXME: missing FC attributes as arguments // const I* input = static_cast<const I*>(input_); // const W* weights = static_cast<const W*>(weights_); // const B* biases = static_cast<const B*>(biases_); @@ -53,9 +53,9 @@ namespace Aidge { // } // template <class I, class W, class B, class O> -// void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const std::array<DimSize_t, 2>& dims, +// void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& params, const std::array<DimSize_t, 2>& dims, // const void* input_, const void* weights_, const void* biases_, void* output_) { -// // FIXME: missing FC parameters as arguments +// // FIXME: missing FC attributes as arguments // const I* input = static_cast<const I*>(input_); // const W* weights = static_cast<const W*>(weights_); // const B* biases = static_cast<const B*>(biases_); @@ -83,9 +83,9 @@ namespace Aidge { // } template <class I, class W, class B, class O> -void FCImpl_cpu_forward_kernel(const FC_Op::Params& params, const DimSize_t batchSize, const DimSize_t oneInputSize, +void FCImpl_cpu_forward_kernel(const FC_Op::Attrs& params, const DimSize_t batchSize, const DimSize_t oneInputSize, const void* input_, const void* weights_, const void* biases_, void* output_) { - // FIXME: missing FC parameters as arguments + // FIXME: missing FC attributes as arguments const I* input = static_cast<const I*>(input_); const W* weights = static_cast<const W*>(weights_); const B* biases = static_cast<const B*>(biases_); diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp index 48a13a54da6d29a388ea4ca32311900662b4a3cd..386ef999fddbda184edee88723d213f53ff62ded 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl.hpp @@ -24,10 +24,10 @@ namespace Aidge { // compute kernel registry for forward and backward class LeakyReLUImplForward_cpu - : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Params&, std::size_t, const void*, void*)> { + : public Registrable<LeakyReLUImplForward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { }; class LeakyReLUImplBackward_cpu - : public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Params&, std::size_t, const void*, void*)> { + : public Registrable<LeakyReLUImplBackward_cpu, std::tuple<DataType, DataType>, void(const LeakyReLU_Op::Attrs&, std::size_t, const void*, void*)> { }; class LeakyReLUImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp index 68d60b0b9deb0c9fbf6a8c6e116d0b4e681509db..a4a926e88d2f8871c541e170c59a1fefdbcdf467 100644 --- a/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/LeakyReLUImpl_forward_kernels.hpp @@ -18,7 +18,7 @@ namespace Aidge { template <class I, class O> -void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Params& params, +void LeakyReLUImpl_cpu_forward_kernel(const LeakyReLU_Op::Attrs& params, std::size_t inputLenght, const void* input_, void* output_) { diff --git a/src/operator/AvgPoolingImpl.cpp b/src/operator/AvgPoolingImpl.cpp index 137bf639242b66fa01ce4aef3cc0ddf8fece09b5..b1f82bbb4323a402d698d772966409e1a8f7224b 100644 --- a/src/operator/AvgPoolingImpl.cpp +++ b/src/operator/AvgPoolingImpl.cpp @@ -70,7 +70,7 @@ void Aidge::AvgPoolingImpl2D_cpu::forward() { Registrar<AvgPoolingImpl2DForward_cpu>::create({mOp.getInput(0)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), + kernelFunc(mOp.getStaticAttributes(), mOp.getInput(0)->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/src/operator/BatchNormImpl.cpp b/src/operator/BatchNormImpl.cpp index 9ced036e2debc7fae7c8d463acceccb34fbe8a3d..90ee2b7a2361166109568e317a1788137150a8d1 100644 --- a/src/operator/BatchNormImpl.cpp +++ b/src/operator/BatchNormImpl.cpp @@ -76,7 +76,7 @@ void Aidge::BatchNormImpl2D_cpu::forward() { mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), + kernelFunc(mOp.getStaticAttributes(), mOp.getInput(0)->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), diff --git a/src/operator/ConvDepthWiseImpl.cpp b/src/operator/ConvDepthWiseImpl.cpp index 9e11b4eaa88169bd1620208c985fe3057f5322e3..7801f64ef46ced22d95af47b8b0e8cc9888a81da 100644 --- a/src/operator/ConvDepthWiseImpl.cpp +++ b/src/operator/ConvDepthWiseImpl.cpp @@ -77,7 +77,7 @@ void Aidge::ConvDepthWiseImpl2D_cpu::forward() { mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); } diff --git a/src/operator/ConvImpl.cpp b/src/operator/ConvImpl.cpp index 97e73ce5e0c349c6b77d9e6b826b68bb60035b18..edab4432fd5792f27ea158f265641855532d6d0b 100644 --- a/src/operator/ConvImpl.cpp +++ b/src/operator/ConvImpl.cpp @@ -75,7 +75,7 @@ void Aidge::ConvImpl2D_cpu::forward() { mOp.getInput(2)->dataType(), mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getInput(1)->getImpl()->rawPtr(), mOp.getInput(2)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/src/operator/FCImpl.cpp b/src/operator/FCImpl.cpp index 540ecdf33f2869b8fb6582b1ec55e2d780aad38d..3cf1ccf6e951ea05521ef67c99a3e628e0f620f5 100644 --- a/src/operator/FCImpl.cpp +++ b/src/operator/FCImpl.cpp @@ -98,7 +98,7 @@ void Aidge::FCImpl_cpu::forward() // Call kernel // if (mOp.getInput(0)->nbDims() == 4) { // kernelFunc( - // mOp.getStaticParameters(), + // mOp.getStaticAttributes(), // std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), // mOp.getInput(0)->getImpl()->rawPtr(), // mOp.mInputs[1]->getImpl()->rawPtr(), @@ -107,7 +107,7 @@ void Aidge::FCImpl_cpu::forward() // } // else kernelFunc( - mOp.getStaticParameters(), + mOp.getStaticAttributes(), mOp.getInput(0)->dims()[0], mOp.getInput(0)->sizeM1(), mOp.getInput(0)->getImpl()->rawPtr(), diff --git a/src/operator/LeakyReLUImpl.cpp b/src/operator/LeakyReLUImpl.cpp index 46b7224fc8a1029724f098d263d0dcf4a2875751..316d3641bb960ed8850a94f40186b77cc8522b58 100644 --- a/src/operator/LeakyReLUImpl.cpp +++ b/src/operator/LeakyReLUImpl.cpp @@ -65,7 +65,7 @@ void Aidge::LeakyReLUImpl_cpu::forward() { mOp.getOutput(0)->dataType()}); // Call kernel - kernelFunc(mOp.getStaticParameters(), + kernelFunc(mOp.getStaticAttributes(), std::static_pointer_cast<Tensor>(mOp.getInput(0))->size(), mOp.getInput(0)->getImpl()->rawPtr(), mOp.getOutput(0)->getImpl()->rawPtr()); diff --git a/unit_tests/operator/Test_LeakyReLUImpl.cpp b/unit_tests/operator/Test_LeakyReLUImpl.cpp index 7096962e196c2ace4abf2b0b14aca8dfa37d3441..d5bd91ff75404a7b928c8919c64e06315b78206f 100644 --- a/unit_tests/operator/Test_LeakyReLUImpl.cpp +++ b/unit_tests/operator/Test_LeakyReLUImpl.cpp @@ -153,7 +153,7 @@ TEST_CASE("[cpu/operator] LeakyReLU(forward)") { REQUIRE(*myLeakyReLU->getOperator()->getOutput(0) == *expectedOutput); } - SECTION("Test construction parameter: negative_slop") { + SECTION("Test construction attribute: negative_slop") { std::shared_ptr<Tensor> input0 = std::make_shared<Tensor>(Array1D<float,10> { {0.0f, 1.0f, 2.0f,-3.0f, 4.0f,-5.0f,-6.0f, 7.0f, 8.0f, 9.0f} });