From bd42c132038d4135bd8dd04b0ed23601d57d8cd8 Mon Sep 17 00:00:00 2001
From: Mickael GUIBERT <mickael.guibert@cea.fr>
Date: Tue, 1 Apr 2025 14:08:26 +0000
Subject: [PATCH] [Feat] add: unit test for conv2d operator int32_t

---
 unit_tests/operator/Test_ConvImpl.cpp | 71 +++++++++++++++++++++++++++
 1 file changed, 71 insertions(+)

diff --git a/unit_tests/operator/Test_ConvImpl.cpp b/unit_tests/operator/Test_ConvImpl.cpp
index 59ec16dd..854789e3 100644
--- a/unit_tests/operator/Test_ConvImpl.cpp
+++ b/unit_tests/operator/Test_ConvImpl.cpp
@@ -21,6 +21,7 @@
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/Conv.hpp"
 #include "aidge/utils/TensorUtils.hpp"
+#include "aidge/operator/Pad.hpp"
 
 using namespace Aidge;
 
@@ -1646,6 +1647,76 @@ TEST_CASE("[cpu/operator] Conv(forward)", "[Conv][CPU]") {
             REQUIRE(approxEq<float>(*(conv_op.getOutput(0)),*expectedOutput, 1e-5f, 1e-6f));
         }
     }
+
+    SECTION("kernel size [7,7]") {
+        SECTION("stride [2,2], no dilation, with padding (3,3,3,3)") {
+            Conv_Op<2> conv_op = Conv_Op<2>({7,7}, {2,2});
+            std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array1D<int32_t,3*4*4> {
+               {
+               54, 46, 32, 24, 18, 13, 13, 17, 22, 8, 34, 37,
+               37, 36, 30, 31, 28, 32, 32, 29, 29, 24, 18, 16,
+               57, 63, 57, 42, 30, 20, 17, 30, 41, 52, 46, 38,
+               65, 52, 60, 60, 59, 61, 65, 70, 69, 69, 71, 67
+               }
+            });
+            myInput->resize(std::vector<std::size_t>({1,4,4,3}));
+            myInput->setDataFormat(DataFormat::NHWC);
+            myInput->setDataFormat(DataFormat::NCHW);
+            std::shared_ptr<Tensor> myBiases = std::make_shared<Tensor>(Array1D<int32_t,1> {
+                {18300}
+            });
+            std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int32_t,1,3,7,7> {
+                {{{{   0,   0,  -1,   0,   1,   0,  -1},
+                    {   0,   0,   0,   1,   1,   0,  -1},
+                    {   0,   0,   0,   1,   1,   1,   0},
+                    {   0,   1,   1,   0,   1,   1,   0},
+                    {   0,   1,   1,   1,   1,   1,   0},
+                    {   0,   1,   1,   1,   1,   0,  -1},
+                    {  -1,   0,   1,   2,   2,   0,  -1}},
+                 
+                   {{   0,   0,  -1,   0,   0,   0,  -1},
+                    {   0,   0,   0,   1,   1,   0,   0},
+                    {   0,   0,   1,   1,   1,   1,   0},
+                    {   0,   1,   1,   1,   1,   1,   1},
+                    {   0,   1,   1,   1,   1,   1,   0},
+                    {   0,   1,   1,   0,   1,   0,   0},
+                    {  -1,   0,   1,   1,   1,   0,  -1}},
+                 
+                   {{   0,  -1,  -1,   0,   1,   0,  -1},
+                    {   0,   1,   1,   2,   2,   1,   0},
+                    {   0,   1,   1,   2,   2,   1,   1},
+                    {   0,   1,   1,   1,   1,   1,   2},
+                    {  -1,   1,   1,   0,   1,   1,   1},
+                    {  -1,   1,   1,   0,   0,   0,   0},
+                    {  -1,   0,   1,   1,   1,   0,   0}}}}
+            });
+            std::shared_ptr<Tensor> expectedOutput = std::make_shared<Tensor>(Array1D<int32_t,1> {
+                {
+                   19282 
+                }
+            });
+            Pad_Op<2> pad_op = Pad_Op<2>({3,3});
+            pad_op.setBackend("cpu");
+            pad_op.associateInput(0,myInput);
+            pad_op.setDataType(DataType::Int32);
+            pad_op.forwardDims();
+            pad_op.forward();
+
+            conv_op.associateInput(0, pad_op.getOutput(0));
+            conv_op.associateInput(1, myWeights);
+            conv_op.associateInput(2, myBiases);
+            conv_op.setBackend("cpu");
+            conv_op.setDataType(DataType::Int32);
+            conv_op.forwardDims();
+            conv_op.forward();
+            conv_op.getOutput(0)->resize(std::vector<std::size_t>({1}));
+            //conv_op.getOutput(0)->print();
+            //fmt::print("{:.^20}\n", "truth");
+            //(*expectedOutput).print();
+            REQUIRE(*(conv_op.getOutput(0)) == *expectedOutput);
+        }
+    }
+
 }
 
 template <DimSize_t DIM>
-- 
GitLab