From 1c8db798d9763bfe07911a1f83fccb78cefe7b4b Mon Sep 17 00:00:00 2001
From: NAUD Maxence <maxence.naud@cea.fr>
Date: Mon, 27 Nov 2023 13:22:55 +0000
Subject: [PATCH] [Fix] Conv and ConDepthWise computeReceptiveField() memeber
 function

---
 include/aidge/operator/Conv.hpp          | 28 ++++----
 include/aidge/operator/ConvDepthWise.hpp | 85 ++++++++++++++----------
 2 files changed, 65 insertions(+), 48 deletions(-)

diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 5fbd1c052..6d81352ce 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -77,9 +77,9 @@ public:
     }
 
     // Data operator[](const char* inputName) override final {
-    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
-    //         (strcmp(inputName, "weight") ? mInputs[1] :
-    //         (strcmp(inputName, "bias") ? mInputs[2] :
+    //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? getInput(0) :
+    //         (strcmp(inputName, "weight") ? getInput(1) :
+    //         (strcmp(inputName, "bias") ? getInput(2) :
     //         nullptr));
     //     assert((in!=nullptr) && "No such parameter");
     //     return *in;
@@ -125,12 +125,12 @@ std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveFiel
         }
         if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
             // Offset
-            const auto outputIdxDims = mOutput->getCoord(firstIdx);
+            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
             auto inputIdxDims = outputIdxDims; // batch idx is the same
             inputIdxDims[1] = 0; // each channel is used so start with the first one
 
             for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-                if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
                     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
                 }
             }
@@ -138,8 +138,8 @@ std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveFiel
             // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
             // Input
             // same batch value, every input channel is used
-            std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]};
-                        for (DimIdx_t i = 0; i < DIM; ++i) {
+            std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
+            for (DimIdx_t i = 0; i < DIM; ++i) {
                 inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
                             * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
                             + 1
@@ -150,20 +150,22 @@ std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveFiel
 
             // Weight
             // same output value, every input channel is used
-            std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]};
-            weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]);
+            std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
+            for (std::size_t i = 0; i < DIM; ++i) {
+                weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
+            }
             std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
             weightIdxDims[0] = outputIdxDims[1];
 
             // Bias
-            const std::vector<DimSize_t> biasDims{outputDims[0]};
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
             const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
 
             // Result
             std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims));
-            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
             return res;
         }
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index ca6401e0e..d351e41dd 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -115,41 +115,56 @@ public:
         }
     }
 
-    // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
-    //     if (outputIdx != 0) {
-    //         AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
-    //     }
-    //     if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
-    //         // Offset
-    //         const auto outputIdxDims = mOutput->getCoord(firstIdx);
-    //         auto inputIdxDims = outputIdxDims; // batch idx is the same
-
-    //         for (DimIdx_t i = 0; i < (DIM+2); ++i) {
-    //             if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
-    //                 AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
-    //             }
-    //         }
-
-    //         // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
-    //         // Width
-    //         std::vector<DimSize_t> inputDims;
-    //         inputDims.push_back(outputDims[0]); // same batch value
-    //         inputDims.push_back(outputDims[1]); // same channel value
-
-    //         for (DimIdx_t i = 0; i < DIM; ++i) {
-    //             inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
-    //                         * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
-    //                         + 1
-    //                         + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
-    //                         * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
-    //             inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
-    //         }
-    //         std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
-    //         res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
-    //         return res;
-    //     }
-    //     AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
-    // }
+    std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
+        if (outputIdx != 0) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
+        }
+        if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
+            // Offset
+            const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
+            auto inputIdxDims = outputIdxDims; // batch idx is the same
+            inputIdxDims[1] = 0; // each channel is used so start with the first one
+
+            for (DimIdx_t i = 0; i < (DIM+2); ++i) {
+                if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
+                    AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
+                }
+            }
+
+            // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
+            // Input
+            // same batch value
+            std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
+            for (DimIdx_t i = 0; i < DIM; ++i) {
+                inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
+                            * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+                            + 1
+                            + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
+                            * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
+                inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
+            }
+
+            // Weight
+            std::vector<DimSize_t> weightDims{outputDims[1], 1};
+            for (std::size_t i = 0; i < DIM; ++i) {
+                weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
+            }
+            std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
+            weightIdxDims[0] = outputIdxDims[1];
+
+            // Bias
+            const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
+            const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
+
+            // Result
+            std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
+            res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
+            return res;
+        }
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
+    }
 
     void setBackend(const std::string &name) override {
         mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
-- 
GitLab