Skip to content
Snippets Groups Projects
Commit 79cd2ca3 authored by Maxence Naud's avatar Maxence Naud
Browse files

Merge branch 'tiling' into 'main'

horizontal tiling

See merge request eclipse/aidge/aidge_core!54
parents 14150413 bdceee73
No related branches found
No related tags found
No related merge requests found
Showing
with 180 additions and 151 deletions
...@@ -12,15 +12,17 @@ namespace Aidge{ ...@@ -12,15 +12,17 @@ namespace Aidge{
/** /**
* @brief this class uses the lexer to create an AST according to a set of gramer rules * @brief this class uses the lexer to create an AST according to a set of gramer rules
*/ */
class GraphParser{ class GraphParser {
public: public:
/** /**
* @brief AST graph creation function * @brief AST graph creation function
* @param gRegexExpressions String representing the logical fuction to be performed * @param gRegexExpressions String representing the logical fuction to be performed
*/ */
GraphParser(const std::string gRegexExpressions); GraphParser(const std::string gRegexExpressions);
~GraphParser() noexcept;
/** /**
* @brief AST graph creation function * @brief AST graph creation function
* @return The AST tree * @return The AST tree
...@@ -35,7 +37,7 @@ class GraphParser{ ...@@ -35,7 +37,7 @@ class GraphParser{
const std::string getQuery(); const std::string getQuery();
private: private:
/** /**
* @brief restart at the start of the ConditionalExpressions for LEXER and restart mCurrentToken * @brief restart at the start of the ConditionalExpressions for LEXER and restart mCurrentToken
*/ */
......
...@@ -29,7 +29,7 @@ using ASTNodeCh = std::vector<std::shared_ptr<AstNode<ConditionalTokenTypes>>>; ...@@ -29,7 +29,7 @@ using ASTNodeCh = std::vector<std::shared_ptr<AstNode<ConditionalTokenTypes>>>;
/** /**
* @brief this class uses the lexer to create an AST according to a set of gramer rules * @brief this class uses the lexer to create an AST according to a set of gramer rules
*/ */
class ConditionalParser{ class ConditionalParser {
public: public:
/** /**
...@@ -38,6 +38,8 @@ class ConditionalParser{ ...@@ -38,6 +38,8 @@ class ConditionalParser{
*/ */
ConditionalParser(const std::string ConditionalExpressions); ConditionalParser(const std::string ConditionalExpressions);
~ConditionalParser() noexcept;
/** /**
* @brief AST graph creation function * @brief AST graph creation function
* @return The AST tree * @return The AST tree
......
...@@ -47,7 +47,7 @@ public: ...@@ -47,7 +47,7 @@ public:
Add_Op(const Add_Op& op) Add_Op(const Add_Op& op)
: OperatorTensor(op) : OperatorTensor(op)
{ {
mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -60,7 +60,7 @@ public: ...@@ -60,7 +60,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -94,40 +94,44 @@ public: ...@@ -94,40 +94,44 @@ public:
} }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
// if (outputIdx != 0) { computeReceptiveField(const std::size_t firstIdx,
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); const std::vector<DimSize_t>& outputDims,
// } const IOIndex_t outputIdx = 0) const override final
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { {
// // Offset if (outputIdx != 0) {
// const auto outputIdxDims = mOutput->getCoord(firstIdx); AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// std::vector<DimSize_t> inputIdxDims = outputIdxDims; }
if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// for (DimIdx_t i = 0; i < (DIM+2); ++i) { // Offset
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); std::vector<DimSize_t> inputIdxDims = outputIdxDims;
// }
// } for (DimIdx_t i = 0; i < (DIM+2); ++i) {
if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// // Width }
// std::vector<DimSize_t> inputDims; }
// inputDims.push_back(outputDims[0]); // same batch value
// inputDims.push_back(outputDims[1]); // same channel value // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// Width
// for (DimIdx_t i = 0; i < DIM; ++i) { std::vector<DimSize_t> inputDims;
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) inputDims.push_back(outputDims[0]); // same batch value
// * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)] inputDims.push_back(outputDims[1]); // same channel value
// + 1
// + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)); for (DimIdx_t i = 0; i < DIM; ++i) {
// inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]; inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// } * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>(); + 1
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims)); + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1));
// return res; inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)];
// } }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
// } res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
......
...@@ -54,7 +54,7 @@ public: ...@@ -54,7 +54,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -55,7 +55,7 @@ public: ...@@ -55,7 +55,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -65,7 +65,7 @@ public: ...@@ -65,7 +65,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -77,9 +77,9 @@ public: ...@@ -77,9 +77,9 @@ public:
} }
// Data operator[](const char* inputName) override final { // Data operator[](const char* inputName) override final {
// std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] : // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? getInput(0) :
// (strcmp(inputName, "weight") ? mInputs[1] : // (strcmp(inputName, "weight") ? getInput(1) :
// (strcmp(inputName, "bias") ? mInputs[2] : // (strcmp(inputName, "bias") ? getInput(2) :
// nullptr)); // nullptr));
// assert((in!=nullptr) && "No such parameter"); // assert((in!=nullptr) && "No such parameter");
// return *in; // return *in;
...@@ -119,55 +119,57 @@ public: ...@@ -119,55 +119,57 @@ public:
} }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) { if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// } }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx); const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
// auto inputIdxDims = outputIdxDims; // batch idx is the same auto inputIdxDims = outputIdxDims; // batch idx is the same
// inputIdxDims[1] = 0; // each channel is used so start with the first one inputIdxDims[1] = 0; // each channel is used so start with the first one
// for (DimIdx_t i = 0; i < (DIM+2); ++i) { for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// } }
// } }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Input // Input
// // same batch value, every input channel is used // same batch value, every input channel is used
// std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]}; std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]};
// for (DimIdx_t i = 0; i < DIM; ++i) { for (DimIdx_t i = 0; i < DIM; ++i) {
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)] * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]
// + 1 + 1
// + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
// * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]); * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]);
// inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]; inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)];
// } }
// // Weight // Weight
// // same output value, every input channel is used // same output value, every input channel is used
// std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]}; std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]};
// weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]); for (std::size_t i = 0; i < DIM; ++i) {
// std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]);
// weightIdxDims[0] = outputIdxDims[1]; }
std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
// // Bias weightIdxDims[0] = outputIdxDims[1];
// const std::vector<DimSize_t> biasDims{outputDims[0]};
// const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]}; // Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
// // Result const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); // Result
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims)); std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims)); res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
// return res; res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
// } res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); return res;
// } }
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this); mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
......
...@@ -67,7 +67,7 @@ public: ...@@ -67,7 +67,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
...@@ -115,41 +115,55 @@ public: ...@@ -115,41 +115,55 @@ public:
} }
} }
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
// if (outputIdx != 0) { if (outputIdx != 0) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor.");
// } }
// if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) {
// // Offset // Offset
// const auto outputIdxDims = mOutput->getCoord(firstIdx); const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx);
// auto inputIdxDims = outputIdxDims; // batch idx is the same auto inputIdxDims = outputIdxDims; // batch idx is the same
// for (DimIdx_t i = 0; i < (DIM+2); ++i) { for (DimIdx_t i = 0; i < (DIM+2); ++i) {
// if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) {
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]);
// } }
// } }
// // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator
// // Width // Input
// std::vector<DimSize_t> inputDims; // same batch value
// inputDims.push_back(outputDims[0]); // same batch value std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]};
// inputDims.push_back(outputDims[1]); // same channel value for (DimIdx_t i = 0; i < DIM; ++i) {
inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1)
// for (DimIdx_t i = 0; i < DIM; ++i) { * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]
// inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) + 1
// * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)] + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)
// + 1 * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]);
// + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
// * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]); }
// inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)];
// } // Weight
// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>(); std::vector<DimSize_t> weightDims{outputDims[1], 1};
// res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); for (std::size_t i = 0; i < DIM; ++i) {
// return res; weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]);
// } }
// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0);
// } weightIdxDims[0] = outputIdxDims[1];
// Bias
const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel
const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]};
// Result
std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res;
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims));
res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims));
return res;
}
AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet.");
}
void setBackend(const std::string &name) override { void setBackend(const std::string &name) override {
mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this); mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
......
...@@ -40,7 +40,7 @@ public: ...@@ -40,7 +40,7 @@ public:
Div_Op(const Div_Op& op) Div_Op(const Div_Op& op)
: OperatorTensor(op) : OperatorTensor(op)
{ {
mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -57,7 +57,7 @@ public: ...@@ -57,7 +57,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -54,7 +54,7 @@ public: ...@@ -54,7 +54,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -56,7 +56,7 @@ public: ...@@ -56,7 +56,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -64,7 +64,7 @@ public: ...@@ -64,7 +64,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -43,7 +43,7 @@ public: ...@@ -43,7 +43,7 @@ public:
Mul_Op(const Mul_Op& op) Mul_Op(const Mul_Op& op)
: OperatorTensor(op) : OperatorTensor(op)
{ {
mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Mul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -74,15 +74,6 @@ public: ...@@ -74,15 +74,6 @@ public:
virtual std::shared_ptr<Operator> clone() const = 0; virtual std::shared_ptr<Operator> clone() const = 0;
virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0; virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0;
/**
* @brief For a given output feature area, compute the associated receptive
* field for each data input.
* @param firstIdx First index of the output feature.
* @param outputDims Size of output feature.
* @param outputIdx Index of the output. Default 0.
* @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
*/
// virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
/** /**
* @brief Set the specified input by performing a deep copy of the given data. * @brief Set the specified input by performing a deep copy of the given data.
......
...@@ -56,7 +56,8 @@ public: ...@@ -56,7 +56,8 @@ public:
mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); mOutputs[i] = std::make_shared<Tensor>();
// mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i)));
// datatype already copied // datatype already copied
} }
} }
...@@ -90,6 +91,16 @@ public: ...@@ -90,6 +91,16 @@ public:
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
// Tensor dimensions // Tensor dimensions
/**
* @brief For a given output feature area, compute the associated receptive
* field for each data input.
* @param firstIdx First index of the output feature.
* @param outputDims Size of output feature.
* @param outputIdx Index of the output. Default 0.
* @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>
* For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
*/
virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
virtual void computeOutputDims(); virtual void computeOutputDims();
virtual bool outputDimsForwarded() const; virtual bool outputDimsForwarded() const;
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
......
...@@ -40,7 +40,7 @@ public: ...@@ -40,7 +40,7 @@ public:
Pow_Op(const Pow_Op& op) Pow_Op(const Pow_Op& op)
: OperatorTensor(op) : OperatorTensor(op)
{ {
mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -51,7 +51,10 @@ public: ...@@ -51,7 +51,10 @@ public:
Producer_Op(const Producer_Op& op) Producer_Op(const Producer_Op& op)
: OperatorTensor(op) : OperatorTensor(op)
{ {
mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
}
mImpl = op.mImpl ? Registrar<Producer_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -39,7 +39,7 @@ public: ...@@ -39,7 +39,7 @@ public:
ReLU_Op(const ReLU_Op& op) ReLU_Op(const ReLU_Op& op)
: OperatorTensor(op) : OperatorTensor(op)
{ {
mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
...@@ -55,7 +55,7 @@ public: ...@@ -55,7 +55,7 @@ public:
: OperatorTensor(op), : OperatorTensor(op),
Attributes_(op) Attributes_(op)
{ {
mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; mImpl = op.mImpl ? Registrar<Scaling_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
} }
/** /**
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment