Skip to content
Snippets Groups Projects
Commit dd21faee authored by Maxence Naud's avatar Maxence Naud
Browse files

[WIP] uncomment 'computeReceptiveField()' member function

parent 9c52d407
No related branches found
No related tags found
No related merge requests found
......@@ -94,40 +94,44 @@ public:
}
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx);
// std::vector<DimSize_t> inputIdxDims = outputIdxDims;
// for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// }
// }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Width
// std::vector<DimSize_t> inputDims;
// inputDims.push_back(outputDims[0]); // same batch value
// inputDims.push_back(outputDims[1]); // same channel value
// for (DimIdx_t i = 0; i < DIM; ++i) {
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
// + 1
// + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
// inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
// }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>();
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims));
// return res;
// }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// }
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
computeReceptiveField(const std::size_t firstIdx,
const std::vector<DimSize_t>& outputDims,
const IOIndex_t outputIdx = 0) const override final
{
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
std::vector<DimSize_t> inputIdxDims = outputIdxDims;
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Width
std::vector<DimSize_t> inputDims;
inputDims.push_back(outputDims[0]); // same batch value
inputDims.push_back(outputDims[1]); // same channel value
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name) override {
......
......@@ -119,55 +119,55 @@ public:
}
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx);
// auto inputIdxDims = outputIdxDims; // batch idx is the same
// inputIdxDims[1] = 0; // each channel is used so start with the first one
// for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// }
// }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Input
// // same batch value, every input channel is used
// std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]};
// for (DimIdx_t i = 0; i < DIM; ++i) {
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
// + 1
// + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
// inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
// }
// // Weight
// // same output value, every input channel is used
// std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]};
// weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]);
// std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
// weightIdxDims[0] = outputIdxDims[1];
// // Bias
// const std::vector<DimSize_t> biasDims{outputDims[0]};
// const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
// // Result
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims));
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims));
// return res;
// }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// }
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
if (outputIdx != 0) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
}
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// Offset
const auto outputIdxDims = mOutput->getCoord(firstIdx);
auto inputIdxDims = outputIdxDims; // batch idx is the same
inputIdxDims[1] = 0; // each channel is used so start with the first one
for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
}
}
// padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Input
// same batch value, every input channel is used
std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]};
for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
+ 1
+ (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
* this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
}
// Weight
// same output value, every input channel is used
std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]};
weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]);
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
weightIdxDims[0] = outputIdxDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[0]};
const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
// Result
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name) override {
mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
......
......@@ -74,15 +74,6 @@ public:
virtual std::shared_ptr<Operator> clone() const = 0;
virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
/**
* @brief For a given output feature area, compute the associated receptive
* field for each data input.
* @param firstIdx First index of the output feature.
* @param outputDims Size of output feature.
* @param outputIdx Index of the output. Default 0.
* @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
*/
// virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
/**
* @brief Set the specified input by performing a deep copy of the given data.
......
......@@ -90,6 +90,16 @@ public:
///////////////////////////////////////////////////
// Tensor dimensions
/**
* @brief For a given output feature area, compute the associated receptive
* field for each data input.
* @param firstIdx First index of the output feature.
* @param outputDims Size of output feature.
* @param outputIdx Index of the output. Default 0.
* @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
* For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
*/
virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
virtual void computeOutputDims();
virtual bool outputDimsForwarded() const;
///////////////////////////////////////////////////
......
......@@ -31,29 +31,6 @@ Aidge::Operator::~Operator() noexcept = default;
// IMPLEMENTATION
///////////////////////////////////////////////////////
// std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operator::computeReceptiveField(
// const std::size_t firstIdx, const std::vector<Aidge::DimSize_t>& outputDims, const Aidge::IOIndex_t outputIdx) const
// {
// static_cast<void>(outputIdx);
// if (outputIdx >= nbOutputs()) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
// }
// if (nbInputs() != nbDataInputs()) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
// }
// if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
// }
// const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
// for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// }
// }
// // return the same Tensor description as given in function parameter for each data input
// return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbDataInputs(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
// }
Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
return mImpl->getNbRequiredData(inputIdx);
}
......
......@@ -88,6 +88,31 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid
}
std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField(
const std::size_t firstIdx,
const std::vector<Aidge::DimSize_t>& outputDims,
const Aidge::IOIndex_t outputIdx) const
{
static_cast<void>(outputIdx);
if (outputIdx >= nbOutputs()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range.");
}
if (nbInputs() != nbData()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function.");
}
if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
const auto outputIdxDims = getOutput(0)->getCoord(firstIdx);
for (DimIdx_t i = 0; i < outputDims.size(); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) {
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
}
}
// return the same Tensor description as given in function parameter for each data input
return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims));
}
void Aidge::OperatorTensor::computeOutputDims() {
// check inputs have been associated
bool associated = (nbInputs() > 0); // do not compute anything if no input
......
......@@ -22,58 +22,63 @@
#include "aidge/utils/Types.h"
namespace Aidge {
// TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
// auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
// auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220}
// auto conv2 = Conv(32, 64, {3, 3}, "conv2"); // output dims: {16, 64, 218, 218}
// auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2}); // output dims: {16, 10, 109, 109}
// auto conv4 = Conv(10, 10, {1, 1}, "conv4"); // output dims: {16, 10, 109, 109}
TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") {
auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider");
auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220}
auto conv2 = Conv(32, 64, {3, 3}, "conv2"); // output dims: {16, 64, 218, 218}
auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2}); // output dims: {16, 10, 109, 109}
auto conv4 = Conv(10, 10, {1, 1}, "conv4"); // output dims: {16, 10, 109, 109}
// auto g = std::make_shared<GraphView>("TestGraph");
auto g = std::make_shared<GraphView>("TestGraph");
// dataProvider->addChild(conv1, 0);
// g->add(conv1);
// g->addChild(conv2, conv1, 0);
// g->addChild(conv3, conv2, 0);
// g->addChild(conv4, conv3, 0);
dataProvider->addChild(conv1, 0);
g->add(conv1);
g->addChild(conv2, conv1, 0);
g->addChild(conv3, conv2, 0);
g->addChild(conv4, conv3, 0);
// g->forwardDims();
g->forwardDims();
// SECTION("Check individual receptive fields") {
// auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,32,10,10});
// auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,20,100,28}), {4,20,30,40});
// auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109});
// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,108,108}), {10,10,1,1});
auto op1 = std::dynamic_pointer_cast<OperatorTensor>(conv1 -> getOperator());
auto op2 = std::dynamic_pointer_cast<OperatorTensor>(conv2 -> getOperator());
auto op3 = std::dynamic_pointer_cast<OperatorTensor>(conv3 -> getOperator());
auto op4 = std::dynamic_pointer_cast<OperatorTensor>(conv4 -> getOperator());
// REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
// REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
// REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
// REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
// }
SECTION("Check individual receptive fields") {
auto res1 = op1 -> computeReceptiveField(0, {16,32,10,10});
auto res2 = op2 -> computeReceptiveField(op2 -> getOutput(0)->getIdx({3,20,100,28}), {4,20,30,40});
auto res3 = op3 -> computeReceptiveField(0, {1,1,109,109});
auto res4 = op4 -> computeReceptiveField(op4 -> getOutput(0)->getIdx({5,0,108,108}), {10,10,1,1});
// SECTION("Check receptive field propagation") {
// // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1}
// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,50,50}), {1,1,1,1});
// // conv4 RF: first-{5, 0, 50, 50} dims-{1, 10, 1, 1}
// auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second);
// // conv3 RF: first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
// auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second);
// // conv2 RF: first-{5, 0, 100, 100} dims-{1, 32, 4, 4}
// auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second);
// // conv1 RF: first-{5, 0, 100, 100} dims-{1, 3, 8, 8}
REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14}))));
REQUIRE(((res2[0].first == op2->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42}))));
REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218}))));
REQUIRE(((res4[0].first == op4->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1}))));
}
// REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
SECTION("Check receptive field propagation") {
// input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1}
auto res4 = op4->computeReceptiveField(op4->getOutput(0)->getIdx({5,0,50,50}), {1,1,1,1});
// conv4 RF: first-{5, 0, 50, 50} dims-{1, 10, 1, 1}
auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second);
// conv3 RF: first-{5, 0, 100, 100} dims-{1, 64, 2, 2}
auto res2 = op2->computeReceptiveField(res3[0].first, res3[0].second);
// conv2 RF: first-{5, 0, 100, 100} dims-{1, 32, 4, 4}
auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second);
// conv1 RF: first-{5, 0, 100, 100} dims-{1, 3, 8, 8}
REQUIRE(((res1[0].first == op1->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8}))));
// // std::cout << "conv1: {";
// // std::cout << conv1->getOperator()->input(0).getCoord(res1[0].first)[0] << ", "
// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[1] << ", "
// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[2] << ", "
// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[3] << "} - {";
// // std::cout << res1[0].second[0] << ", "
// // << res1[0].second[1] << ", "
// // << res1[0].second[2] << ", "
// // << res1[0].second[3] << "}" << std::endl;
// }
// }
// std::cout << "conv1: {";
// std::cout << op1->input(0).getCoord(res1[0].first)[0] << ", "
// << op1->input(0).getCoord(res1[0].first)[1] << ", "
// << op1->input(0).getCoord(res1[0].first)[2] << ", "
// << op1->input(0).getCoord(res1[0].first)[3] << "} - {";
// std::cout << res1[0].second[0] << ", "
// << res1[0].second[1] << ", "
// << res1[0].second[2] << ", "
// << res1[0].second[3] << "}" << std::endl;
}
}
} // namespace Aidge
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment