Skip to content
Snippets Groups Projects
Commit 47967e1f authored by Maxence Naud's avatar Maxence Naud
Browse files

[Upd] 'computeReceptiveField()' member function to take dimensions as inputs instead of flatId

parent 0cf389b5
No related branches found
No related tags found
No related merge requests found
......@@ -94,22 +94,24 @@ public:
}
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
computeReceptiveField(const std::size_t firstIdx,
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
computeReceptiveField(const std::vector<DimSize_t>& firstEltDims,
const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override final
{
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
std::vector<DimSize_t> inputIdxDims = outputIdxDims;
std::vector<DimSize_t> inputIdxDims = firstEltDims;
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
......@@ -126,8 +128,8 @@ public:
+ (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
......
......@@ -119,19 +119,21 @@ public:
}
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
auto inputIdxDims = outputIdxDims; // batch idx is the same
auto inputIdxDims = firstEltDims; // batch idx is the same
inputIdxDims[1] = 0; // each channel is used so start with the first one
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
......@@ -162,10 +164,10 @@ std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveFiel
const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
// Result
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
......
......@@ -115,18 +115,20 @@ public:
}
}
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if (firstEltDims.size() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "outputDims and firstEltDims should have the size of the output Tensor dimensions.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
auto inputIdxDims = outputIdxDims; // batch idx is the same
auto inputIdxDims = firstEltDims; // batch idx is the same
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
if (((outputDims[i] + firstEltDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
......@@ -156,10 +158,10 @@ public:
const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
// Result
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(inputIdxDims, inputDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(weightIdxDims, weightDims));
res.push_back(std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>(biasIdxDims, biasDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
......
......@@ -100,7 +100,7 @@ public:
* @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
* For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
*/
virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
virtual void computeOutputDims();
virtual bool outputDimsForwarded() const;
///////////////////////////////////////////////////
......
......@@ -88,8 +88,8 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid
}
std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
const std::size_t firstIdx,
std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
const std::vector<DimSize_t>& firstEltDims,
const std::vector<Aidge::DimSize_t>& outputDims,
const Aidge::IOIndex_t outputIdx) const
{
......@@ -103,14 +103,13 @@ std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operat
if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
if (((outputDims[i] + firstEltDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), firstEltDims[i], outputDims[i]);
}
}
// return the same Tensor description as given in function parameter for each data input
return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
}
void Aidge::OperatorTensor::computeOutputDims() {
......
......@@ -45,10 +45,10 @@ TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeR
auto op4 = std::dynamic_pointer_cast<OperatorTensor>(conv4 -> getOperator());
SECTION("Check individual receptive fields") {
auto res1 = op1 -> computeReceptiveField(0, {16,32,10,10});
auto res2 = op2 -> computeReceptiveField(op2 -> getOutput(0)->getIdx({3,20,100,28}), {4,20,30,40});
auto res3 = op3 -> computeReceptiveField(0, {1,1,109,109});
auto res4 = op4 -> computeReceptiveField(op4 -> getOutput(0)->getIdx({5,0,108,108}), {10,10,1,1});
auto res1 = op1 -> computeReceptiveField({0,0,0,0}, {16,32,10,10});
auto res2 = op2 -> computeReceptiveField({3,20,100,28}, {4,20,30,40});
auto res3 = op3 -> computeReceptiveField({0,0,0,0}, {1,1,109,109});
auto res4 = op4 -> computeReceptiveField({5,0,108,108}, {10,10,1,1});
REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
REQUIRE(((res1[1].first == 0) && (res1[1].second == std::vector<DimSize_t>({32, 3, 5, 5}))));
......@@ -60,7 +60,7 @@ TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeR
SECTION("Check receptive field propagation") {
// input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1}
auto res4 = op4->computeReceptiveField(op4->getOutput(0)->getIdx({5,0,50,50}), {1,1,1,1});
auto res4 = op4->computeReceptiveField({5,0,50,50}, {1,1,1,1});
// conv4 RF: first-{5, 0, 50, 50} dims-{1, 10, 1, 1}
auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second);
// conv3 RF: first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment