Skip to content
Snippets Groups Projects
Commit c698971e authored by Maxence Naud's avatar Maxence Naud
Browse files

[Tmp] also remove computeReceptiveField() from ConvDepthWise operator

parent b5a9890c
No related branches found
No related tags found
2 merge requests!46Remove Operator reference to Tensor,!20Draft: Introduction of Tiling
Pipeline #34291 passed
......@@ -124,41 +124,41 @@ class ConvDepthWise_Op : public Operator,
bool outputDimsForwarded() const override final { return !(mOutput->empty()); }
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
const auto outputIdxDims = mOutput->getCoord(firstIdx);
auto inputIdxDims = outputIdxDims; // batch idx is the same
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > mOutput->dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Width
std::vector<DimSize_t> inputDims;
inputDims.push_back(outputDims[0]); // same batch value
inputDims.push_back(outputDims[1]); // same channel value
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx);
// auto inputIdxDims = outputIdxDims; // batch idx is the same
// for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// }
// }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Width
// std::vector<DimSize_t> inputDims;
// inputDims.push_back(outputDims[0]); // same batch value
// inputDims.push_back(outputDims[1]); // same channel value
// for (DimIdx_t i = 0; i < DIM; ++i) {
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
// + 1
// + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
// inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
// }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
// return res;
// }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// }
inline Tensor& input(const IOIndex_t inputIdx) const override final {
assert(inputIdx < 3 && "operators supports only 3 inputs");
......
......@@ -22,47 +22,47 @@
#include "aidge/utils/Types.h"
namespace Aidge {
TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") {
auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
auto conv1 = ConvDepthWise({5, 5}, "conv1"); // output dims: {16, 3, 220, 220}
auto conv2 = ConvDepthWise({3, 3}, "conv2"); // output dims: {16, 3, 218, 218}
auto conv3 = ConvDepthWise({2, 2}, "conv3", {2,2}); // output dims: {16, 3, 109, 109}
auto conv4 = ConvDepthWise({1, 1}, "conv4"); // output dims: {16, 3, 109, 109}
// TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") {
// auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
// auto conv1 = ConvDepthWise({5, 5}, "conv1"); // output dims: {16, 3, 220, 220}
// auto conv2 = ConvDepthWise({3, 3}, "conv2"); // output dims: {16, 3, 218, 218}
// auto conv3 = ConvDepthWise({2, 2}, "conv3", {2,2}); // output dims: {16, 3, 109, 109}
// auto conv4 = ConvDepthWise({1, 1}, "conv4"); // output dims: {16, 3, 109, 109}
auto g = std::make_shared<GraphView>("TestGraph");
// auto g = std::make_shared<GraphView>("TestGraph");
dataProvider->addChild(conv1, 0);
g->add(conv1);
g->addChild(conv2, conv1, 0);
g->addChild(conv3, conv2, 0);
g->addChild(conv4, conv3, 0);
// dataProvider->addChild(conv1, 0);
// g->add(conv1);
// g->addChild(conv2, conv1, 0);
// g->addChild(conv3, conv2, 0);
// g->addChild(conv4, conv3, 0);
g->forwardDims();
// g->forwardDims();
SECTION("Check individual receptive fields") {
auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,3,10,10});
auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,1,100,28}), {4,2,30,40});
auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,108,108}), {10,1,1,1});
// SECTION("Check individual receptive fields") {
// auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,3,10,10});
// auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,1,100,28}), {4,2,30,40});
// auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,108,108}), {10,1,1,1});
REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
}
// REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
// REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42}))));
// REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218}))));
// REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1}))));
// }
SECTION("Check receptive field propagation") {
// input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1}
auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,50,50}), {1,1,1,1});
// conv4 RF: first-{5, 0, 50, 50} dims-{1, 1, 1, 1}
auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
// conv3 RF: first-{5, 0, 100, 100} dims-{1, 1, 2, 2}
auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
// conv2 RF: first-{5, 0, 100, 100} dims-{1, 1, 4, 4}
auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
// conv1 RF: first-{5, 0, 100, 100} dims-{1, 1, 8, 8}
// SECTION("Check receptive field propagation") {
// // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1}
// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,50,50}), {1,1,1,1});
// // conv4 RF: first-{5, 0, 50, 50} dims-{1, 1, 1, 1}
// auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
// // conv3 RF: first-{5, 0, 100, 100} dims-{1, 1, 2, 2}
// auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
// // conv2 RF: first-{5, 0, 100, 100} dims-{1, 1, 4, 4}
// auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
// // conv1 RF: first-{5, 0, 100, 100} dims-{1, 1, 8, 8}
REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
}
}
// REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
// }
// }
} // namespace Aidge
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment