diff --git a/include/aidge/backend/cpu/operator/FoldImpl.hpp b/include/aidge/backend/cpu/operator/FoldImpl.hpp index a0c7e509cddbd1b33b8360aed4a8bbce4a39dcac..61701138b0cc1c7f0b7dcea0609ca0d463137e08 100644 --- a/include/aidge/backend/cpu/operator/FoldImpl.hpp +++ b/include/aidge/backend/cpu/operator/FoldImpl.hpp @@ -27,8 +27,13 @@ namespace Aidge { class FoldImpl2DForward_cpu : public Registrable<FoldImpl2DForward_cpu, std::tuple<DataType, DataType>, - void(const Fold_Op<2>::Attrs &, const std::vector<DimSize_t> &, const void *, - void *)> {}; + void(const std::array<DimSize_t, 2>&, + const std::array<DimSize_t, 2>&, + const std::array<DimSize_t, 2>&, + const std::array<DimSize_t, 2>&, + const std::vector<DimSize_t> &, + const void *, + void *)> {}; class FoldImpl2D_cpu : public OperatorImpl { public: diff --git a/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp b/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp index e2db9475cb452df6b43df02bbeff67741c1f71fa..3dba2319af62fb3dfb2fa75ae9c592ee7ff88e65 100644 --- a/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp +++ b/include/aidge/backend/cpu/operator/FoldImpl_forward_kernels.hpp @@ -23,17 +23,18 @@ namespace Aidge { template <class I, class O> -void FoldImpl2D_cpu_forward_kernel(const Fold_Op<2>::Attrs &attrs, const std::vector<DimSize_t> &dims, - const void *input_, void *output_) +void FoldImpl2D_cpu_forward_kernel(const std::array<DimSize_t, 2>& outputDims, + const std::array<DimSize_t, 2>& strideDims, + const std::array<DimSize_t, 2>& dilationDims, + const std::array<DimSize_t, 2>& kernelDims, + const std::vector<DimSize_t> &dims, + const void *input_, void *output_) { const I *input = static_cast<const I *>(input_); O *output = static_cast<O *>(output_); - const auto kernelDims = std::get<3>(attrs); - const auto dilationDims = std::get<2>(attrs); - const auto strideDims = std::get<1>(attrs); - const DimSize_t inHeight = std::get<0>(attrs)[0]; - const DimSize_t inWidth = std::get<0>(attrs)[1]; + const DimSize_t inHeight = outputDims[0]; + const DimSize_t inWidth = outputDims[1]; const DimSize_t kernelExtentHeight = dilationDims[0] * (kernelDims[0] - 1) + 1; diff --git a/src/operator/FoldImpl.cpp b/src/operator/FoldImpl.cpp index bcb0b4b029f4c8cd899e123f62519987fc432a3d..532ba946ab8a615a4ba0cb162faca28f1ca6c550 100644 --- a/src/operator/FoldImpl.cpp +++ b/src/operator/FoldImpl.cpp @@ -30,8 +30,12 @@ void Aidge::FoldImpl2D_cpu::forward() { Registrar<FoldImpl2DForward_cpu>::create({std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dataType(), std::static_pointer_cast<Tensor>(mOp.getRawOutput(0))->dataType()}); // Call kernel - kernelFunc(dynamic_cast<const Fold_Op<2>&>(mOp).getStaticAttributes(), - std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), - getCPUPtr(mOp.getRawInput(0)), - getCPUPtr(mOp.getRawOutput(0))); + const auto& op_ = static_cast<const Fold_Op<2>&>(mOp); + kernelFunc(op_.outputDims(), + op_.strideDims(), + op_.dilationDims(), + op_.kernelDims(), + std::static_pointer_cast<Tensor>(mOp.getRawInput(0))->dims(), + getCPUPtr(mOp.getRawInput(0)), + getCPUPtr(mOp.getRawOutput(0))); } diff --git a/unit_tests/recipies/Test_ConvToMatMul.cpp b/unit_tests/recipies/Test_ConvToMatMul.cpp index e815b1035f4653a20d757798408ca53b6565f589..05c5eef83394ba8c965dfabae2bcd8c2b4502c79 100644 --- a/unit_tests/recipies/Test_ConvToMatMul.cpp +++ b/unit_tests/recipies/Test_ConvToMatMul.cpp @@ -67,7 +67,7 @@ TEST_CASE("[ConvToMatMul] conv") { // Simplify the graph: freeze parameters to allow reshaping of the Producers for (auto node : g2->getNodes()) { if (node->type() == Producer_Op::Type && node->name() != "dataProvider") { - std::static_pointer_cast<Producer_Op>(node->getOperator())->getAttr<bool>("Constant") = true; + std::static_pointer_cast<Producer_Op>(node->getOperator())->constant() = true; } }