diff --git a/unit_tests/operator/Test_ConvImpl.cpp b/unit_tests/operator/Test_ConvImpl.cpp index 59ec16dd80ee98c09c79d5943c503e945abf5cdb..854789e386797bf1ea6c7e18eb9ed1d5525d5063 100644 --- a/unit_tests/operator/Test_ConvImpl.cpp +++ b/unit_tests/operator/Test_ConvImpl.cpp @@ -21,6 +21,7 @@ #include "aidge/graph/Node.hpp" #include "aidge/operator/Conv.hpp" #include "aidge/utils/TensorUtils.hpp" +#include "aidge/operator/Pad.hpp" using namespace Aidge; @@ -1646,6 +1647,76 @@ TEST_CASE("[cpu/operator] Conv(forward)", "[Conv][CPU]") { REQUIRE(approxEq<float>(*(conv_op.getOutput(0)),*expectedOutput, 1e-5f, 1e-6f)); } } + + SECTION("kernel size [7,7]") { + SECTION("stride [2,2], no dilation, with padding (3,3,3,3)") { + Conv_Op<2> conv_op = Conv_Op<2>({7,7}, {2,2}); + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array1D<int32_t,3*4*4> { + { + 54, 46, 32, 24, 18, 13, 13, 17, 22, 8, 34, 37, + 37, 36, 30, 31, 28, 32, 32, 29, 29, 24, 18, 16, + 57, 63, 57, 42, 30, 20, 17, 30, 41, 52, 46, 38, + 65, 52, 60, 60, 59, 61, 65, 70, 69, 69, 71, 67 + } + }); + myInput->resize(std::vector<std::size_t>({1,4,4,3})); + myInput->setDataFormat(DataFormat::NHWC); + myInput->setDataFormat(DataFormat::NCHW); + std::shared_ptr<Tensor> myBiases = std::make_shared<Tensor>(Array1D<int32_t,1> { + {18300} + }); + std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int32_t,1,3,7,7> { + {{{{ 0, 0, -1, 0, 1, 0, -1}, + { 0, 0, 0, 1, 1, 0, -1}, + { 0, 0, 0, 1, 1, 1, 0}, + { 0, 1, 1, 0, 1, 1, 0}, + { 0, 1, 1, 1, 1, 1, 0}, + { 0, 1, 1, 1, 1, 0, -1}, + { -1, 0, 1, 2, 2, 0, -1}}, + + {{ 0, 0, -1, 0, 0, 0, -1}, + { 0, 0, 0, 1, 1, 0, 0}, + { 0, 0, 1, 1, 1, 1, 0}, + { 0, 1, 1, 1, 1, 1, 1}, + { 0, 1, 1, 1, 1, 1, 0}, + { 0, 1, 1, 0, 1, 0, 0}, + { -1, 0, 1, 1, 1, 0, -1}}, + + {{ 0, -1, -1, 0, 1, 0, -1}, + { 0, 1, 1, 2, 2, 1, 0}, + { 0, 1, 1, 2, 2, 1, 1}, + { 0, 1, 1, 1, 1, 1, 2}, + { -1, 1, 1, 0, 1, 1, 1}, + { -1, 1, 1, 0, 0, 0, 0}, + { -1, 0, 1, 1, 1, 0, 0}}}} + }); + std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int32_t,1> { + { + 19282 + } + }); + Pad_Op<2> pad_op = Pad_Op<2>({3,3}); + pad_op.setBackend("cpu"); + pad_op.associateInput(0,myInput); + pad_op.setDataType(DataType::Int32); + pad_op.forwardDims(); + pad_op.forward(); + + conv_op.associateInput(0, pad_op.getOutput(0)); + conv_op.associateInput(1, myWeights); + conv_op.associateInput(2, myBiases); + conv_op.setBackend("cpu"); + conv_op.setDataType(DataType::Int32); + conv_op.forwardDims(); + conv_op.forward(); + conv_op.getOutput(0)->resize(std::vector<std::size_t>({1})); + //conv_op.getOutput(0)->print(); + //fmt::print("{:.^20}\n", "truth"); + //(*expectedOutput).print(); + REQUIRE(*(conv_op.getOutput(0)) == *expectedOutput); + } + } + } template <DimSize_t DIM>