From 2a2ccd7c1517509c0a01a8415e04b51b51104eb5 Mon Sep 17 00:00:00 2001 From: Olivier BICHLER <olivier.bichler@cea.fr> Date: Thu, 4 Jul 2024 14:48:53 +0200 Subject: [PATCH] Fixed merge issues --- include/aidge/backend/cpu/operator/FoldImpl.hpp | 9 +++++++-- .../cpu/operator/FoldImpl_forward_kernels.hpp | 15 ++++++++------- src/operator/FoldImpl.cpp | 12 ++++++++---- unit_tests/recipies/Test_ConvToMatMul.cpp | 2 +- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/include/aidge/backend/cpu/operator/FoldImpl.hpp b/include/aidge/backend/cpu/operator/FoldImpl.hpp index a0c7e509..61701138 100644 --- a/include/aidge/backend/cpu/operator/FoldImpl.hpp +++ b/include/aidge/backend/cpu/operator/FoldImpl.hpp @@ -27,8 +27,13 @@ namespace Aidge { class FoldImpl2DForward_cpu : public Registrable<FoldImpl2DForward_cpu, std::tuple<DataType, DataType>, - void(const Fold_Op<2>::Attrs &, const std::vector<DimSize_t> &, const void *, - void *)> {}; + void(const std::array<DimSize_t, 2>&, + const std::array<DimSize_t, 2>&, + const std::array<DimSize_t, 2>&, + const std::array<DimSize_t, 2>&, + const std::vector<DimSize_t> &, + const void *, + void *)> {}; class FoldImpl2D_cpu : public OperatorImpl { public: diff --git a/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp index e2db9475..3dba2319 100644 --- a/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp @@ -23,17 +23,18 @@ namespace Aidge { template <class I, class O> -void FoldImpl2D_cpu_forward_kernel(const Fold_Op<2>::Attrs &attrs, const std::vector<DimSize_t> &dims, - const void *input_, void *output_) +void FoldImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& outputDims, + const std::array<DimSize_t, 2>& strideDims, + const std::array<DimSize_t, 2>& dilationDims, + const std::array<DimSize_t, 2>& kernelDims, + const std::vector<DimSize_t> &dims, + const void *input_, void *output_) { const I *input = static_cast<const I *>(input_); O *output = static_cast<O *>(output_); - const auto kernelDims = std::get<3>(attrs); - const auto dilationDims = std::get<2>(attrs); - const auto strideDims = std::get<1>(attrs); - const DimSize_t inHeight = std::get<0>(attrs)[0]; - const DimSize_t inWidth = std::get<0>(attrs)[1]; + const DimSize_t inHeight = outputDims[0]; + const DimSize_t inWidth = outputDims[1]; const DimSize_t kernelExtentHeight = dilationDims[0] * (kernelDims[0] - 1) + 1; diff --git a/src/operator/FoldImpl.cpp b/src/operator/FoldImpl.cpp index bcb0b4b0..532ba946 100644 --- a/src/operator/FoldImpl.cpp +++ b/src/operator/FoldImpl.cpp @@ -30,8 +30,12 @@ void Aidge::FoldImpl2D_cpu::forward() { Registrar<FoldImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); // Call kernel - kernelFunc(dynamic_cast<const Fold_Op<2>&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), - getCPUPtr(mOp.getRawInput(0)), - getCPUPtr(mOp.getRawOutput(0))); + const auto& op_ = static_cast<const Fold_Op<2>&>(mOp); + kernelFunc(op_.outputDims(), + op_.strideDims(), + op_.dilationDims(), + op_.kernelDims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); } diff --git a/unit_tests/recipies/Test_ConvToMatMul.cpp b/unit_tests/recipies/Test_ConvToMatMul.cpp index e815b103..05c5eef8 100644 --- a/unit_tests/recipies/Test_ConvToMatMul.cpp +++ b/unit_tests/recipies/Test_ConvToMatMul.cpp @@ -67,7 +67,7 @@ TEST_CASE("[ConvToMatMul] conv") { // Simplify the graph: freeze parameters to allow reshaping of the Producers for (auto node : g2->getNodes()) { if (node->type() == Producer_Op::Type && node->name() != "dataProvider") { - std::static_pointer_cast<Producer_Op>(node->getOperator())->getAttr<bool>("Constant") = true; + std::static_pointer_cast<Producer_Op>(node->getOperator())->constant() = true; } } -- GitLab