From dd21faee98903a0d08accfe0ada3144847fc2d96 Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Fri, 24 Nov 2023 14:24:01 +0000 Subject: [PATCH] [WIP] uncomment 'computeReceptiveField()' member function --- include/aidge/operator/AvgPooling.hpp | 72 +++++++++-------- include/aidge/operator/Conv.hpp | 98 +++++++++++------------ include/aidge/operator/Operator.hpp | 9 --- include/aidge/operator/OperatorTensor.hpp | 10 +++ src/operator/Operator.cpp | 23 ------ src/operator/OperatorTensor.cpp | 25 ++++++ unit_tests/operator/Test_Conv_Op.cpp | 95 +++++++++++----------- 7 files changed, 172 insertions(+), 160 deletions(-) diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index f0f9f6c54..ca2980821 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -94,40 +94,44 @@ public: } - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { - // if (outputIdx != 0) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); - // } - // if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { - // // Offset - // const auto outputIdxDims = mOutput->getCoord(firstIdx); - // std::vector<DimSize_t> inputIdxDims = outputIdxDims; - - // for (DimIdx_t i = 0; i < (DIM+2); ++i) { - // if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); - // } - // } - - // // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator - // // Width - // std::vector<DimSize_t> inputDims; - // inputDims.push_back(outputDims[0]); // same batch value - // inputDims.push_back(outputDims[1]); // same channel value - - // for (DimIdx_t i = 0; i < DIM; ++i) { - // inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)] - // + 1 - // + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)); - // inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]; - // } - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>(); - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims)); - // return res; - // } - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); - // } + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> + computeReceptiveField(const std::size_t firstIdx, + const std::vector<DimSize_t>& outputDims, + const IOIndex_t outputIdx = 0) const override final + { + if (outputIdx != 0) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); + } + if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + // Offset + const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx); + std::vector<DimSize_t> inputIdxDims = outputIdxDims; + + for (DimIdx_t i = 0; i < (DIM+2); ++i) { + if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); + } + } + + // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator + // Width + std::vector<DimSize_t> inputDims; + inputDims.push_back(outputDims[0]); // same batch value + inputDims.push_back(outputDims[1]); // same channel value + + for (DimIdx_t i = 0; i < DIM; ++i) { + inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) + * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)] + + 1 + + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)); + inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]; + } + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res; + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); + return res; + } + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); + } void setBackend(const std::string &name) override { diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 4f0fb1ea2..5fbd1c052 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -119,55 +119,55 @@ public: } -// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { - // if (outputIdx != 0) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); - // } - // if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { - // // Offset - // const auto outputIdxDims = mOutput->getCoord(firstIdx); - // auto inputIdxDims = outputIdxDims; // batch idx is the same - // inputIdxDims[1] = 0; // each channel is used so start with the first one - - // for (DimIdx_t i = 0; i < (DIM+2); ++i) { - // if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); - // } - // } - - // // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator - // // Input - // // same batch value, every input channel is used - // std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]}; - // for (DimIdx_t i = 0; i < DIM; ++i) { - // inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)] - // + 1 - // + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]); - // inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]; - // } - - // // Weight - // // same output value, every input channel is used - // std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]}; - // weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]); - // std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); - // weightIdxDims[0] = outputIdxDims[1]; - - // // Bias - // const std::vector<DimSize_t> biasDims{outputDims[0]}; - // const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]}; - - // // Result - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res; - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims)); - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims)); - // return res; - // } - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); - // } +std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { + if (outputIdx != 0) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); + } + if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + // Offset + const auto outputIdxDims = mOutput->getCoord(firstIdx); + auto inputIdxDims = outputIdxDims; // batch idx is the same + inputIdxDims[1] = 0; // each channel is used so start with the first one + + for (DimIdx_t i = 0; i < (DIM+2); ++i) { + if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); + } + } + + // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator + // Input + // same batch value, every input channel is used + std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]}; + for (DimIdx_t i = 0; i < DIM; ++i) { + inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) + * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)] + + 1 + + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) + * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]); + inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]; + } + + // Weight + // same output value, every input channel is used + std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]}; + weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]); + std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); + weightIdxDims[0] = outputIdxDims[1]; + + // Bias + const std::vector<DimSize_t> biasDims{outputDims[0]}; + const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]}; + + // Result + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res; + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims)); + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims)); + return res; + } + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); + } void setBackend(const std::string &name) override { mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this); diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index b0f8435bd..1f4cdd23f 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -74,15 +74,6 @@ public: virtual std::shared_ptr<Operator> clone() const = 0; virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0; - /** - * @brief For a given output feature area, compute the associated receptive - * field for each data input. - * @param firstIdx First index of the output feature. - * @param outputDims Size of output feature. - * @param outputIdx Index of the output. Default 0. - * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area. - */ - // virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const; /** * @brief Set the specified input by performing a deep copy of the given data. diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index a55d7ac28..a490b09d0 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -90,6 +90,16 @@ public: /////////////////////////////////////////////////// // Tensor dimensions + /** + * @brief For a given output feature area, compute the associated receptive + * field for each data input. + * @param firstIdx First index of the output feature. + * @param outputDims Size of output feature. + * @param outputIdx Index of the output. Default 0. + * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> + * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area. + */ + virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const; virtual void computeOutputDims(); virtual bool outputDimsForwarded() const; /////////////////////////////////////////////////// diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp index eb94db87d..4adc57f55 100644 --- a/src/operator/Operator.cpp +++ b/src/operator/Operator.cpp @@ -31,29 +31,6 @@ Aidge::Operator::~Operator() noexcept = default; // IMPLEMENTATION /////////////////////////////////////////////////////// -// std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operator::computeReceptiveField( -// const std::size_t firstIdx, const std::vector<Aidge::DimSize_t>& outputDims, const Aidge::IOIndex_t outputIdx) const -// { -// static_cast<void>(outputIdx); -// if (outputIdx >= nbOutputs()) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range."); -// } -// if (nbInputs() != nbDataInputs()) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function."); -// } -// if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); -// } -// const auto outputIdxDims = getOutput(0)->getCoord(firstIdx); -// for (DimIdx_t i = 0; i < outputDims.size(); ++i) { -// if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); -// } -// } -// // return the same Tensor description as given in function parameter for each data input -// return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbDataInputs(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims)); -// } - Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { return mImpl->getNbRequiredData(inputIdx); } diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index 1d16e9064..1237fdc0b 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -88,6 +88,31 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid } +std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField( + const std::size_t firstIdx, + const std::vector<Aidge::DimSize_t>& outputDims, + const Aidge::IOIndex_t outputIdx) const +{ + static_cast<void>(outputIdx); + if (outputIdx >= nbOutputs()) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range."); + } + if (nbInputs() != nbData()) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function."); + } + if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); + } + const auto outputIdxDims = getOutput(0)->getCoord(firstIdx); + for (DimIdx_t i = 0; i < outputDims.size(); ++i) { + if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); + } + } + // return the same Tensor description as given in function parameter for each data input + return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims)); +} + void Aidge::OperatorTensor::computeOutputDims() { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp index ac667ec5a..1a543ae64 100644 --- a/unit_tests/operator/Test_Conv_Op.cpp +++ b/unit_tests/operator/Test_Conv_Op.cpp @@ -22,58 +22,63 @@ #include "aidge/utils/Types.h" namespace Aidge { -// TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") { -// auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider"); -// auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220} -// auto conv2 = Conv(32, 64, {3, 3}, "conv2"); // output dims: {16, 64, 218, 218} -// auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2}); // output dims: {16, 10, 109, 109} -// auto conv4 = Conv(10, 10, {1, 1}, "conv4"); // output dims: {16, 10, 109, 109} +TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") { + auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider"); + auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220} + auto conv2 = Conv(32, 64, {3, 3}, "conv2"); // output dims: {16, 64, 218, 218} + auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2}); // output dims: {16, 10, 109, 109} + auto conv4 = Conv(10, 10, {1, 1}, "conv4"); // output dims: {16, 10, 109, 109} -// auto g = std::make_shared<GraphView>("TestGraph"); + auto g = std::make_shared<GraphView>("TestGraph"); -// dataProvider->addChild(conv1, 0); -// g->add(conv1); -// g->addChild(conv2, conv1, 0); -// g->addChild(conv3, conv2, 0); -// g->addChild(conv4, conv3, 0); + dataProvider->addChild(conv1, 0); + g->add(conv1); + g->addChild(conv2, conv1, 0); + g->addChild(conv3, conv2, 0); + g->addChild(conv4, conv3, 0); -// g->forwardDims(); + g->forwardDims(); -// SECTION("Check individual receptive fields") { -// auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,32,10,10}); -// auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,20,100,28}), {4,20,30,40}); -// auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109}); -// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,108,108}), {10,10,1,1}); + auto op1 = std::dynamic_pointer_cast<OperatorTensor>(conv1 -> getOperator()); + auto op2 = std::dynamic_pointer_cast<OperatorTensor>(conv2 -> getOperator()); + auto op3 = std::dynamic_pointer_cast<OperatorTensor>(conv3 -> getOperator()); + auto op4 = std::dynamic_pointer_cast<OperatorTensor>(conv4 -> getOperator()); -// REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14})))); -// REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42})))); -// REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218})))); -// REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1})))); -// } + SECTION("Check individual receptive fields") { + auto res1 = op1 -> computeReceptiveField(0, {16,32,10,10}); + auto res2 = op2 -> computeReceptiveField(op2 -> getOutput(0)->getIdx({3,20,100,28}), {4,20,30,40}); + auto res3 = op3 -> computeReceptiveField(0, {1,1,109,109}); + auto res4 = op4 -> computeReceptiveField(op4 -> getOutput(0)->getIdx({5,0,108,108}), {10,10,1,1}); -// SECTION("Check receptive field propagation") { -// // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} -// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,50,50}), {1,1,1,1}); -// // conv4 RF: first-{5, 0, 50, 50} dims-{1, 10, 1, 1} -// auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second); -// // conv3 RF: first-{5, 0, 100, 100} dims-{1, 64, 2, 2} -// auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second); -// // conv2 RF: first-{5, 0, 100, 100} dims-{1, 32, 4, 4} -// auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second); -// // conv1 RF: first-{5, 0, 100, 100} dims-{1, 3, 8, 8} + REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14})))); + REQUIRE(((res2[0].first == op2->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42})))); + REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218})))); + REQUIRE(((res4[0].first == op4->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1})))); + } -// REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8})))); + SECTION("Check receptive field propagation") { + // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} + auto res4 = op4->computeReceptiveField(op4->getOutput(0)->getIdx({5,0,50,50}), {1,1,1,1}); + // conv4 RF: first-{5, 0, 50, 50} dims-{1, 10, 1, 1} + auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second); + // conv3 RF: first-{5, 0, 100, 100} dims-{1, 64, 2, 2} + auto res2 = op2->computeReceptiveField(res3[0].first, res3[0].second); + // conv2 RF: first-{5, 0, 100, 100} dims-{1, 32, 4, 4} + auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second); + // conv1 RF: first-{5, 0, 100, 100} dims-{1, 3, 8, 8} + REQUIRE(((res1[0].first == op1->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8})))); -// // std::cout << "conv1: {"; -// // std::cout << conv1->getOperator()->input(0).getCoord(res1[0].first)[0] << ", " -// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[1] << ", " -// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[2] << ", " -// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[3] << "} - {"; -// // std::cout << res1[0].second[0] << ", " -// // << res1[0].second[1] << ", " -// // << res1[0].second[2] << ", " -// // << res1[0].second[3] << "}" << std::endl; -// } -// } + + // std::cout << "conv1: {"; + // std::cout << op1->input(0).getCoord(res1[0].first)[0] << ", " + // << op1->input(0).getCoord(res1[0].first)[1] << ", " + // << op1->input(0).getCoord(res1[0].first)[2] << ", " + // << op1->input(0).getCoord(res1[0].first)[3] << "} - {"; + // std::cout << res1[0].second[0] << ", " + // << res1[0].second[1] << ", " + // << res1[0].second[2] << ", " + // << res1[0].second[3] << "}" << std::endl; + } +} } // namespace Aidge \ No newline at end of file -- GitLab