From 16f41fab26be81921143257a3c9e841ee45eff1f Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Wed, 4 Oct 2023 14:23:38 +0200 Subject: [PATCH] Fixed missing changes --- include/aidge/backend/cpu/operator/MatMulImpl.hpp | 4 ++-- .../cpu/operator/MatMulImpl_forward_kernels.hpp | 10 +++++----- src/operator/MatMulImpl.cpp | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/aidge/backend/cpu/operator/MatMulImpl.hpp b/include/aidge/backend/cpu/operator/MatMulImpl.hpp index 437d0228..bf8e31ef 100644 --- a/include/aidge/backend/cpu/operator/MatMulImpl.hpp +++ b/include/aidge/backend/cpu/operator/MatMulImpl.hpp @@ -27,11 +27,11 @@ namespace Aidge { // compute kernel registry for forward and backward class MatMulImplForward_cpu : public Registrable<MatMulImplForward_cpu, std::tuple<DataType, DataType, DataType>, - void(const MatMul_Op::Parameters &, const DimSize_t, const DimSize_t, + void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, void *)> {}; class MatMulImplBackward_cpu : public Registrable<MatMulImplBackward_cpu, std::tuple<DataType, DataType, DataType>, - void(const MatMul_Op::Parameters &, const DimSize_t, const DimSize_t, + void(const MatMul_Op::Attrs &, const DimSize_t, const DimSize_t, const void *, const void *, void *)> {}; class MatMulImpl_cpu : public OperatorImpl { diff --git a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp index 2ef7e433..bc52779e 100644 --- a/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/MatMulImpl_forward_kernels.hpp @@ -20,7 +20,7 @@ namespace Aidge { template <class I, class W, class O> -void MatMulImpl_cpu_forward_kernel(const MatMul_Op::Parameters& params, const DimSize_t batchSize, const DimSize_t oneInputSize, +void MatMulImpl_cpu_forward_kernel(const MatMul_Op::Attrs& attrs, const DimSize_t batchSize, const DimSize_t oneInputSize, const void* input_, const void* weights_, void* output_) { // FIXME: missing MatMul parameters as arguments const I* input = static_cast<const I*>(input_); @@ -28,14 +28,14 @@ void MatMulImpl_cpu_forward_kernel(const MatMul_Op::Parameters& params, const Di O* output = static_cast<O*>(output_); - std::fill(output, output+(batchSize*std::get<0>(params)), O(0)); + std::fill(output, output+(batchSize*std::get<0>(attrs)), O(0)); for (std::size_t batch = 0; batch < batchSize; ++batch) { - for (std::size_t out = 0; out < std::get<0>(params); ++out) { - output[out + batch*std::get<0>(params)] = std::inner_product(input + batch*oneInputSize, + for (std::size_t out = 0; out < std::get<0>(attrs); ++out) { + output[out + batch*std::get<0>(attrs)] = std::inner_product(input + batch*oneInputSize, input + (batch + 1)*oneInputSize, weights + out*oneInputSize, - output[out + batch*std::get<0>(params)]); + output[out + batch*std::get<0>(attrs)]); } } } diff --git a/src/operator/MatMulImpl.cpp b/src/operator/MatMulImpl.cpp index 47f11dca..972e1f0f 100644 --- a/src/operator/MatMulImpl.cpp +++ b/src/operator/MatMulImpl.cpp @@ -96,7 +96,7 @@ void Aidge::MatMulImpl_cpu::forward() // Call kernel // if (mOp.getInput(0)->nbDims() == 4) { // kernelFunc( - // mOp.getParams(), + // mOp.getStaticAttributes(), // std::static_pointer_cast<Tensor>(mOp.getInput(0))->dims<4>(), // mOp.getInput(0)->getImpl()->rawPtr(), // mOp.mInputs[1]->getImpl()->rawPtr(), @@ -105,7 +105,7 @@ void Aidge::MatMulImpl_cpu::forward() // } // else kernelFunc( - mOp.getParams(), + mOp.getStaticAttributes(), mOp.getInput(0)->dims()[0], mOp.getInput(0)->sizeM1(), mOp.getInput(0)->getImpl()->rawPtr(), -- GitLab