diff --git a/include/aidge/graphRegex/GraphParser.hpp b/include/aidge/graphRegex/GraphParser.hpp index cfe25c22709a3516b4f55ba774a616e3b94a055c..2c25ac0b76368242891e6e5ba92c2c5fc913a23c 100644 --- a/include/aidge/graphRegex/GraphParser.hpp +++ b/include/aidge/graphRegex/GraphParser.hpp @@ -12,15 +12,17 @@ namespace Aidge{ /** * @brief this class uses the lexer to create an AST according to a set of gramer rules */ -class GraphParser{ +class GraphParser { - public: +public: /** * @brief AST graph creation function * @param gRegexExpressions String representing the logical fuction to be performed */ GraphParser(const std::string gRegexExpressions); + ~GraphParser() noexcept; + /** * @brief AST graph creation function * @return The AST tree @@ -35,7 +37,7 @@ class GraphParser{ const std::string getQuery(); - private: +private: /** * @brief restart at the start of the ConditionalExpressions for LEXER and restart mCurrentToken */ diff --git a/include/aidge/nodeTester/ConditionalParser.hpp b/include/aidge/nodeTester/ConditionalParser.hpp index c21eca0407b77808287138fd39e33c00d241fb70..1f3671ea5b68008a67be5d6a63d09051d49939d5 100644 --- a/include/aidge/nodeTester/ConditionalParser.hpp +++ b/include/aidge/nodeTester/ConditionalParser.hpp @@ -29,7 +29,7 @@ using ASTNodeCh = std::vector<std::shared_ptr<AstNode<ConditionalTokenTypes>>>; /** * @brief this class uses the lexer to create an AST according to a set of gramer rules */ -class ConditionalParser{ +class ConditionalParser { public: /** @@ -38,6 +38,8 @@ class ConditionalParser{ */ ConditionalParser(const std::string ConditionalExpressions); + ~ConditionalParser() noexcept; + /** * @brief AST graph creation function * @return The AST tree diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 0c285402929ab7b071d732180891de1b738dc4a8..f5521a1d12728a7957cb67c09861ee673e21cbae 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -47,7 +47,7 @@ public: Add_Op(const Add_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Add_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index f0f9f6c54ed1953ed31b713ce19edc7a8e594d4a..5fb1d5b16c55f7f5b6cea4db02d3aa955831e08b 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -60,7 +60,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** @@ -94,40 +94,44 @@ public: } - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { - // if (outputIdx != 0) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); - // } - // if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { - // // Offset - // const auto outputIdxDims = mOutput->getCoord(firstIdx); - // std::vector<DimSize_t> inputIdxDims = outputIdxDims; - - // for (DimIdx_t i = 0; i < (DIM+2); ++i) { - // if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); - // } - // } - - // // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator - // // Width - // std::vector<DimSize_t> inputDims; - // inputDims.push_back(outputDims[0]); // same batch value - // inputDims.push_back(outputDims[1]); // same channel value - - // for (DimIdx_t i = 0; i < DIM; ++i) { - // inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)] - // + 1 - // + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)); - // inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]; - // } - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>(); - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInput->getIdx(inputIdxDims), inputDims)); - // return res; - // } - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); - // } + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> + computeReceptiveField(const std::size_t firstIdx, + const std::vector<DimSize_t>& outputDims, + const IOIndex_t outputIdx = 0) const override final + { + if (outputIdx != 0) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); + } + if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + // Offset + const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx); + std::vector<DimSize_t> inputIdxDims = outputIdxDims; + + for (DimIdx_t i = 0; i < (DIM+2); ++i) { + if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); + } + } + + // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator + // Width + std::vector<DimSize_t> inputDims; + inputDims.push_back(outputDims[0]); // same batch value + inputDims.push_back(outputDims[1]); // same channel value + + for (DimIdx_t i = 0; i < DIM; ++i) { + inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) + * this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)] + + 1 + + (this->template getAttr<AvgPoolingAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1)); + inputIdxDims[2+i] *= this->template getAttr<AvgPoolingAttr::StrideDims>()[static_cast<std::size_t>(i)]; + } + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res; + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); + return res; + } + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); + } void setBackend(const std::string &name) override { diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index 09a9bb9efac81431673ef3449f717fbcb9af5108..be850d377e5a1781b2cb04b5040c257ecc30cd92 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -54,7 +54,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp index 01d590aa7425cb62ab665c0078019a6c8ab4a66a..78e21f85250c361053857e27c582e1487aeec64e 100644 --- a/include/aidge/operator/Concat.hpp +++ b/include/aidge/operator/Concat.hpp @@ -55,7 +55,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Concat_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 4f0fb1ea2717c1fdf4443c450000ec3a56bb9b5b..b62d393bc37859f24c4f54f8ce1ba4458bf11ab4 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -65,7 +65,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** @@ -77,9 +77,9 @@ public: } // Data operator[](const char* inputName) override final { - // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] : - // (strcmp(inputName, "weight") ? mInputs[1] : - // (strcmp(inputName, "bias") ? mInputs[2] : + // std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? getInput(0) : + // (strcmp(inputName, "weight") ? getInput(1) : + // (strcmp(inputName, "bias") ? getInput(2) : // nullptr)); // assert((in!=nullptr) && "No such parameter"); // return *in; @@ -119,55 +119,57 @@ public: } -// std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { - // if (outputIdx != 0) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); - // } - // if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { - // // Offset - // const auto outputIdxDims = mOutput->getCoord(firstIdx); - // auto inputIdxDims = outputIdxDims; // batch idx is the same - // inputIdxDims[1] = 0; // each channel is used so start with the first one - - // for (DimIdx_t i = 0; i < (DIM+2); ++i) { - // if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); - // } - // } - - // // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator - // // Input - // // same batch value, every input channel is used - // std::vector<DimSize_t> inputDims{outputDims[0], mInputs[0]->dims()[1]}; - // for (DimIdx_t i = 0; i < DIM; ++i) { - // inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)] - // + 1 - // + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]); - // inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]; - // } - - // // Weight - // // same output value, every input channel is used - // std::vector<DimSize_t> weightDims{outputDims[0], mInputs[0]->dims()[1]}; - // weightDims.insert(weightDims.end(), this->template getAttr<ConvAttr::KernelDims>()[0], this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(DIM)]); - // std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); - // weightIdxDims[0] = outputIdxDims[1]; - - // // Bias - // const std::vector<DimSize_t> biasDims{outputDims[0]}; - // const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]}; - - // // Result - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res; - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[1]->getIdx(weightIdxDims), weightDims)); - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[2]->getIdx(biasIdxDims), biasDims)); - // return res; - // } - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); - // } +std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { + if (outputIdx != 0) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); + } + if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + // Offset + const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx); + auto inputIdxDims = outputIdxDims; // batch idx is the same + inputIdxDims[1] = 0; // each channel is used so start with the first one + + for (DimIdx_t i = 0; i < (DIM+2); ++i) { + if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); + } + } + + // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator + // Input + // same batch value, every input channel is used + std::vector<DimSize_t> inputDims{outputDims[0], getInput(0)->dims()[1]}; + for (DimIdx_t i = 0; i < DIM; ++i) { + inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) + * this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)] + + 1 + + (this->template getAttr<ConvAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) + * this->template getAttr<ConvAttr::DilationDims>()[static_cast<std::size_t>(i)]); + inputIdxDims[2+i] *= this->template getAttr<ConvAttr::StrideDims>()[static_cast<std::size_t>(i)]; + } + + // Weight + // same output value, every input channel is used + std::vector<DimSize_t> weightDims{outputDims[1], getInput(0)->dims()[1]}; + for (std::size_t i = 0; i < DIM; ++i) { + weightDims.push_back(this->template getAttr<ConvAttr::KernelDims>()[i]); + } + std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); + weightIdxDims[0] = outputIdxDims[1]; + + // Bias + const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel + const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]}; + + // Result + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res; + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims)); + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims)); + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims)); + return res; + } + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); + } void setBackend(const std::string &name) override { mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this); diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index ca6401e0ed3ac888f12858853f0d8f494c226041..c95315f6d63e817354fc82dded4e3cfb4ed1b704 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -67,7 +67,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** @@ -115,41 +115,55 @@ public: } } - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { - // if (outputIdx != 0) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); - // } - // if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { - // // Offset - // const auto outputIdxDims = mOutput->getCoord(firstIdx); - // auto inputIdxDims = outputIdxDims; // batch idx is the same - - // for (DimIdx_t i = 0; i < (DIM+2); ++i) { - // if (((outputDims[i] + outputIdxDims[i]) > mOutput->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); - // } - // } - - // // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator - // // Width - // std::vector<DimSize_t> inputDims; - // inputDims.push_back(outputDims[0]); // same batch value - // inputDims.push_back(outputDims[1]); // same channel value - - // for (DimIdx_t i = 0; i < DIM; ++i) { - // inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)] - // + 1 - // + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) - // * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]); - // inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]; - // } - // std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res = std::vector<std::pair<std::size_t, std::vector<DimSize_t>>>(); - // res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(mInputs[0]->getIdx(inputIdxDims), inputDims)); - // return res; - // } - // AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); - // } + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override { + if (outputIdx != 0) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Conv_Op Operator has got only one output Tensor."); + } + if ((outputDims.size() == (DIM+2)) && outputDimsForwarded()) { + // Offset + const auto outputIdxDims = mOutputs[0]->getCoord(firstIdx); + auto inputIdxDims = outputIdxDims; // batch idx is the same + + for (DimIdx_t i = 0; i < (DIM+2); ++i) { + if (((outputDims[i] + outputIdxDims[i]) > mOutputs[0]->template dims<DIM+2>()[i]) || (outputDims[i] == 0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); + } + } + + // padding is not a parameter of Conv_Op. It is handled in Pad_Op Operator + // Input + // same batch value + std::vector<DimSize_t> inputDims{outputDims[0], outputDims[1]}; + for (DimIdx_t i = 0; i < DIM; ++i) { + inputDims.push_back((outputDims[2+static_cast<std::size_t>(i)] - 1) + * this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)] + + 1 + + (this->template getAttr<ConvDepthWiseAttr::KernelDims>()[static_cast<std::size_t>(i)] - 1) + * this->template getAttr<ConvDepthWiseAttr::DilationDims>()[static_cast<std::size_t>(i)]); + inputIdxDims[2+i] *= this->template getAttr<ConvDepthWiseAttr::StrideDims>()[static_cast<std::size_t>(i)]; + } + + // Weight + std::vector<DimSize_t> weightDims{outputDims[1], 1}; + for (std::size_t i = 0; i < DIM; ++i) { + weightDims.push_back(this->template getAttr<ConvDepthWiseAttr::KernelDims>()[i]); + } + std::vector<DimSize_t> weightIdxDims = std::vector<DimSize_t>(DIM+2, 0); + weightIdxDims[0] = outputIdxDims[1]; + + // Bias + const std::vector<DimSize_t> biasDims{outputDims[1]}; // the number of output channel + const std::vector<DimSize_t> biasIdxDims{outputIdxDims[1]}; + + // Result + std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> res; + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(0)->getIdx(inputIdxDims), inputDims)); + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(1)->getIdx(weightIdxDims), weightDims)); + res.push_back(std::pair<std::size_t, std::vector<DimSize_t>>(getInput(2)->getIdx(biasIdxDims), biasDims)); + return res; + } + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); + } void setBackend(const std::string &name) override { mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this); diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp index ba76c0bdecfaf86644a3336a1076064b96b36046..fcdb03a6be36bc9e1be7d69d01005f92b535d00c 100644 --- a/include/aidge/operator/Div.hpp +++ b/include/aidge/operator/Div.hpp @@ -40,7 +40,7 @@ public: Div_Op(const Div_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Div_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 4cece292cb322c0a58f96380eb0f0083771d3c19..8dea38335dd052f2dbf7d0aa7fc4f7fe84741a06 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -57,7 +57,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 800c8c61d876b6f33cce1af3365179b7eb14b68d..2474e2e5af4139b77cace03b27b603fb66b7699a 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -54,7 +54,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp index 23c12d45802e25f29891c48164acfb2d3ad137ac..90930dd22a36f84a7479e245eb09d9c28dfd031d 100644 --- a/include/aidge/operator/MatMul.hpp +++ b/include/aidge/operator/MatMul.hpp @@ -56,7 +56,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<MatMul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index ad50a27a94a2217c94445fb556c84ec7f121c6b9..c46ddb3797e2303ee27814c96ef060156bdc9108 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -64,7 +64,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp index 5b9ab4eb8c3924133f32ddfeb0a5f05963381771..337fe6e65cc040e67ee033516731a7ba8de86d2d 100644 --- a/include/aidge/operator/Mul.hpp +++ b/include/aidge/operator/Mul.hpp @@ -43,7 +43,7 @@ public: Mul_Op(const Mul_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Mul_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Mul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index b0f8435bd0126cf3fba9f956a432017585a4d873..1f4cdd23f9a765924305ebeb43e3e6ee1ad73496 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -74,15 +74,6 @@ public: virtual std::shared_ptr<Operator> clone() const = 0; virtual void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) = 0; - /** - * @brief For a given output feature area, compute the associated receptive - * field for each data input. - * @param firstIdx First index of the output feature. - * @param outputDims Size of output feature. - * @param outputIdx Index of the output. Default 0. - * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> For each dataInput Tensor of the Operator, the first index and dimensions of the feature area. - */ - // virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const; /** * @brief Set the specified input by performing a deep copy of the given data. diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index a55d7ac2842f948d923f9e1e54d2ffed1fd0f954..126e5d467d0f341a8c5b8c5d16d188ebe92135d0 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -56,7 +56,8 @@ public: mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)), mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { - mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); + mOutputs[i] = std::make_shared<Tensor>(); + // mOutputs[i] = std::make_shared<Tensor>(*(other.getOutput(i))); // datatype already copied } } @@ -90,6 +91,16 @@ public: /////////////////////////////////////////////////// // Tensor dimensions + /** + * @brief For a given output feature area, compute the associated receptive + * field for each data input. + * @param firstIdx First index of the output feature. + * @param outputDims Size of output feature. + * @param outputIdx Index of the output. Default 0. + * @return std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> + * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area. + */ + virtual std::vector<std::pair<std::size_t, std::vector<DimSize_t>>> computeReceptiveField(const std::size_t firstIdx, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const; virtual void computeOutputDims(); virtual bool outputDimsForwarded() const; /////////////////////////////////////////////////// diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp index 0b0ae82f012eace8b5a2d5eb362a359386495b79..a5cd3a9b047f9a32665cc2de1ead4f2221fed4aa 100644 --- a/include/aidge/operator/Pow.hpp +++ b/include/aidge/operator/Pow.hpp @@ -40,7 +40,7 @@ public: Pow_Op(const Pow_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Pow_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index fb6a20403adc1ee5cddb5869fd9d39ef59fb776e..a3f6e085ce3849c1b057f0fdb043093b338b48a1 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -51,7 +51,10 @@ public: Producer_Op(const Producer_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { + mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i))); + } + mImpl = op.mImpl ? Registrar<Producer_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 3444c25fc2e1572e78a1377b3273580f494ac8f9..15dec9be8516f71f5f4dfd0aec6a2985671da53d 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -39,7 +39,7 @@ public: ReLU_Op(const ReLU_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index fd6d6bcfccc36829671538e1f2e31b13644e3938..98e082ac27f7cdf90d5d0464d811f116ae9f59ae 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -55,7 +55,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Scaling_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp index 7bdbd8099ab79c9f1714989dc41cfc0893427bc9..b92c1818d49b53d4a2eda9a8d2704a06ca2980ca 100644 --- a/include/aidge/operator/Slice.hpp +++ b/include/aidge/operator/Slice.hpp @@ -26,21 +26,20 @@ namespace Aidge { enum class SliceAttr { Beginning, SliceDims }; -template <DimIdx_t DIM> class Slice_Op : public OperatorTensor, - public Registrable<Slice_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op<DIM> &)>, - public StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>> { + public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>, + public StaticAttributes<SliceAttr, std::size_t, std::vector<DimSize_t>> { public: static constexpr const char *Type = "Slice"; Slice_Op() = delete; - using Attributes_ = StaticAttributes<SliceAttr, std::size_t, std::array<DimSize_t, DIM>>; + using Attributes_ = StaticAttributes<SliceAttr, std::size_t, std::vector<DimSize_t>>; template <SliceAttr e> using attr = typename Attributes_::template attr<e>; - Slice_Op(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims) + Slice_Op(const std::size_t beginningPos, const std::vector<DimSize_t> sliceDims) : OperatorTensor(Type, 1, 0, 1), Attributes_(attr<SliceAttr::Beginning>(beginningPos), attr<SliceAttr::SliceDims>(sliceDims)) @@ -55,7 +54,7 @@ public: : OperatorTensor(op), Attributes_(op) { - mImpl = op.mImpl ? Registrar<Slice_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) + mImpl = op.mImpl ? Registrar<Slice_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } @@ -70,12 +69,8 @@ public: if (!getInput(0) || (getInput(0)->empty())) { AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor"); } - // Check input dimensions is compatible with slice dimensions - if (getInput(0)->nbDims() != DIM) { - AIDGE_THROW_OR_ABORT(std::runtime_error, "Error: input and slice dimensions are not the same size."); - } - std::array<DimSize_t, DIM> outputDims; - const std::array<DimSize_t, DIM> inputDims = getInput(0)->template dims<DIM>(); + std::vector<DimSize_t> outputDims = std::vector<DimSize_t>(getInput(0)->nbDims()); + const std::vector<DimSize_t> inputDims = getInput(0)->dims(); // Check that the sliced Tensor is actually part of the input Tensor // For a 5*5 tensor ('x') and a 3*3 slice kernel ('o'): @@ -85,7 +80,7 @@ public: // xxooo xxxoo // xxooo xxxoo std::vector<std::size_t> beginningCoords = mInputs[0]->getCoord(this->template getAttr<SliceAttr::Beginning>()); - for (std::size_t i = 0; i < DIM; ++i) { + for (std::size_t i = 0; i < getInput(0)->nbDims(); ++i) { if (beginningCoords[i] + this->template getAttr<SliceAttr::SliceDims>()[i] > inputDims[i]) { AIDGE_THROW_OR_ABORT(std::runtime_error, "ROI of Slice operator out of bounds"); } else { @@ -95,7 +90,7 @@ public: mOutputs[0]->resize(outputDims); } - void setBackend(const std::string &name) { + void setBackend(const std::string &name) override { mImpl = Registrar<Slice_Op>::create(name)(*this); mOutputs[0]->setBackend(name); @@ -111,16 +106,11 @@ public: } }; -template <std::size_t DIM> -inline std::shared_ptr<Node> Slice(std::size_t beginningPos, std::array<DimSize_t, DIM> sliceDims, + +inline std::shared_ptr<Node> Slice(const std::size_t beginningPos, const std::vector<DimSize_t> sliceDims, const std::string &name = "") { // FIXME: properly handle default w&b initialization in every cases - return std::make_shared<Node>(std::make_shared<Slice_Op<DIM>>( beginningPos, sliceDims), name); -} - -template <DimIdx_t DIM> -inline std::shared_ptr<Node> Slice(std::size_t beginningPos, DimSize_t const (&sliceDims)[DIM], const std::string& name = "") { - return Slice(beginningPos, to_array(sliceDims), name); + return std::make_shared<Node>(std::make_shared<Slice_Op>(beginningPos, sliceDims), name); } } // namespace Aidge diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index cc19cb8210af516f349de124f65cdd55308609fb..d5c91945e83469dc9c6fef2b5adef026790b568d 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -40,7 +40,7 @@ public: Softmax_Op(const Softmax_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp index a4069b59bbe7e7586d02b71a39d811d9bf972b77..1fe609fc2913afcda735ba2859126188aad4de5f 100644 --- a/include/aidge/operator/Sqrt.hpp +++ b/include/aidge/operator/Sqrt.hpp @@ -45,7 +45,7 @@ public: Sqrt_Op(const Sqrt_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp index becf98926d2da777c6551e8ed2fbd7b5fcf50017..d141ad42015838e89e6d59c22bcefe56e795170c 100644 --- a/include/aidge/operator/Sub.hpp +++ b/include/aidge/operator/Sub.hpp @@ -45,7 +45,7 @@ public: Sub_Op(const Sub_Op& op) : OperatorTensor(op) { - mImpl = op.mImpl ? Registrar<Sub_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr; + mImpl = op.mImpl ? Registrar<Sub_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/recipies/Recipies.hpp b/include/aidge/recipies/Recipies.hpp index 26f4cc9da35832324b58ab1142278d0844b40707..a17ead8f8f5fa5106c375050ef5b82e6f149535a 100644 --- a/include/aidge/recipies/Recipies.hpp +++ b/include/aidge/recipies/Recipies.hpp @@ -84,7 +84,7 @@ void fuseBatchNorm(std::shared_ptr<MatchSolution> solution); */ void fuseBatchNorm(std::shared_ptr<GraphView> graphView); -// std::set<std::shared_ptr<Node>> getHorizontalTiling(const std::shared_ptr<Node>& node, const DimIdx_t axis, const std::size_t nbSlices); +std::set<std::shared_ptr<Node>> getConvHorizontalTiling(const std::shared_ptr<Node>& node, const DimIdx_t axis, const std::size_t nbSlices); // void horizontalTiling(std::shared_ptr<Node> node, DimIdx_t dim, std::size_t nbSlices); // std::set<std::shared_ptr<Node>> getHorizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices); // void horizontalTiling(std::set<std::shared_ptr<Node>> setOfNodes, DimIdx_t dim, std::size_t nbSlices); diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp index faf6c49bdbe28e7214f06a4d116cf23a1739154f..6dcec5aaa4fa80aefebd538a1728445051ca080e 100644 --- a/include/aidge/scheduler/Scheduler.hpp +++ b/include/aidge/scheduler/Scheduler.hpp @@ -23,7 +23,7 @@ class Node; class GraphView; class SequentialScheduler { -public: +private: struct SchedulingElement { SchedulingElement( std::shared_ptr<Node> node_, @@ -36,6 +36,7 @@ public: std::chrono::time_point<std::chrono::high_resolution_clock> end; }; +public: SequentialScheduler(std::shared_ptr<GraphView> graphView) : mGraphView(graphView) { @@ -44,6 +45,10 @@ public: ~SequentialScheduler() = default; void generateScheduling(bool verbose = false); + inline void resetScheduling() { + mScheduling.clear(); + mStaticSchedule.clear(); + } /** * @brief Run the provided Computational Graph with a batch of data @@ -58,13 +63,12 @@ public: /** * @brief Return a vector of Node ordered by the order they are called by the scheduler - * * @return std::vector<std::shared_ptr<Node>> */ - std::vector<std::shared_ptr<Node>> getStaticScheduling(){ + inline std::vector<std::shared_ptr<Node>> getStaticScheduling() const noexcept { return mStaticSchedule; } - std::shared_ptr<GraphView> getGraphView(){ + inline std::shared_ptr<GraphView> getGraphView() const noexcept { return mGraphView; } @@ -77,20 +81,11 @@ private: */ std::set<std::shared_ptr<Node>> getConsumers(const std::set<std::shared_ptr<Node>>& producers) const; - /** - * @brief Shared ptr to the scheduled graph view - * - */ + /** @brief Shared ptr to the scheduled graph view */ std::shared_ptr<GraphView> mGraphView; - /** - * @brief List of SchedulingElement (i.e: Nodes with their computation time) - * - */ + /** @brief List of SchedulingElement (i.e: Nodes with their computation time) */ std::vector<SchedulingElement> mScheduling; - /** - * @brief List of nodes ordered by their - * - */ + /** @brief List of nodes ordered by their */ std::vector<std::shared_ptr<Node>> mStaticSchedule; }; } // namespace Aidge diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 96466cd1a4b81dae3eec120360055bdf0f8c5844..ce956d115e282c43751619070dd8a10ac5c9cfae 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -188,18 +188,17 @@ void Aidge::GraphView::forwardDims() { // assess if the input was not already set and is a Tensor then link it to parent output std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i); if (inputI.first) { - if ( std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i)) != inputI.first->getOperator()->getRawOutput(inputI.second)) { - if ((strcmp(nodePtr->getOperator()->getRawInput(i)->type(), Tensor::Type) == 0) && (strcmp(inputI.first->getOperator()->getRawOutput(inputI.second)->type(), Tensor::Type)==0)) { - // assert provided Data is of "Tensor" type - nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second)); - } - else { - assert(false && "Non-tensor entries not handled yet.\n"); - } - } - } else - { - assert(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty()); + if ( std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i)) != inputI.first->getOperator()->getRawOutput(inputI.second)) { + if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) { + // assert provided Data is of "Tensor" type + nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second)); + } + else { + assert(false && "Non-tensor entries not handled yet.\n"); + } + } + } else { + assert(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty()); } } diff --git a/src/graphRegex/GraphParser.cpp b/src/graphRegex/GraphParser.cpp index 9c3d10114d777cf7755432a5723a3b70b81d37a1..9ad96a34bfbe36bdae65cae072eb4f1edcd3faaf 100644 --- a/src/graphRegex/GraphParser.cpp +++ b/src/graphRegex/GraphParser.cpp @@ -1,19 +1,23 @@ -#include "aidge/graphRegex/GraphParser.hpp" +#include <memory> +#include <string> +#include <vector> -using namespace Aidge; +#include "aidge/graphRegex/GraphParser.hpp" -GraphParser::GraphParser(const std::string gRegexExpressions): +Aidge::GraphParser::GraphParser(const std::string gRegexExpressions): mLexer(gRegexExpressions) { mCurrentToken = mLexer.getNextToken(); } +Aidge::GraphParser::~GraphParser() noexcept = default; -const std::string GraphParser::getQuery(){ + +const std::string Aidge::GraphParser::getQuery(){ return mLexer.getQuery(); } -std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::parse(void){ +std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::parse(void){ std::shared_ptr<AstNode<gRegexTokenTypes>> astTree = constructAstAllExpr(); rstParser(); @@ -21,14 +25,14 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::parse(void){ } -void GraphParser::rstParser(void){ +void Aidge::GraphParser::rstParser(void){ mLexer.rstPosition(); mCurrentToken = mLexer.getNextToken(); } -void GraphParser::ackToken(gRegexTokenTypes tokenType){ - +void Aidge::GraphParser::ackToken(gRegexTokenTypes tokenType){ + if(mCurrentToken->getType() == tokenType ){ try { mCurrentToken = mLexer.getNextToken(); @@ -48,7 +52,7 @@ void GraphParser::ackToken(gRegexTokenTypes tokenType){ /* exp : KEY(QOM | QZM)? | CKEY | domain */ -std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstExp(void) +std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstExp(void) { try{ @@ -86,15 +90,15 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstExp(void) } /* -seq :exp (NEXT seq)* +seq :exp (NEXT seq)* */ -std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstSeq(void) +std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstSeq(void) { try{ - + std::shared_ptr<AstNode<gRegexTokenTypes>> left = constructAstExp(); - if(mCurrentToken->getType() == gRegexTokenTypes::NEXT ) + if(mCurrentToken->getType() == gRegexTokenTypes::NEXT ) { std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = mCurrentToken->copy(); ackToken(gRegexTokenTypes::NEXT); @@ -114,15 +118,15 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstSeq(void) /* -LPAREN seq RPAREN (QOM | QZM) +LPAREN seq RPAREN (QOM | QZM) */ -std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstDomain(void) +std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstDomain(void) { try{ std::shared_ptr<ParsingToken<gRegexTokenTypes>> token ; std::shared_ptr<AstNode<gRegexTokenTypes>> node ; - + token = mCurrentToken->copy(); ackToken(gRegexTokenTypes::LPAREN); node = std::make_shared<AstNode<gRegexTokenTypes>>(token, @@ -144,7 +148,7 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstDomain(void) errorMessage << "Bad syntax constructAstDomain must have quantifier \n"; throw std::runtime_error(errorMessage.str()); } - + return node; } catch (const std::runtime_error& e) { @@ -157,12 +161,12 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstDomain(void) /* allExpr: seq (SEP allExpr)* | STOP */ -std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstAllExpr(void) +std::shared_ptr<Aidge::AstNode<Aidge::gRegexTokenTypes>> Aidge::GraphParser::constructAstAllExpr(void) { try{ std::shared_ptr<AstNode<gRegexTokenTypes>> left = constructAstSeq(); - if(mCurrentToken->getType() == gRegexTokenTypes::SEP ) + if(mCurrentToken->getType() == gRegexTokenTypes::SEP ) { std::shared_ptr<ParsingToken<gRegexTokenTypes>> token = mCurrentToken->copy(); ackToken(gRegexTokenTypes::SEP); @@ -170,7 +174,7 @@ std::shared_ptr<AstNode<gRegexTokenTypes>> GraphParser::constructAstAllExpr(void if(mCurrentToken->getType() == gRegexTokenTypes::STOP ) { return left; - } + } std::shared_ptr<AstNode<gRegexTokenTypes>> newNode = std::make_shared<AstNode<gRegexTokenTypes>>(token, std::vector<std::shared_ptr<AstNode<gRegexTokenTypes>>>{left,constructAstAllExpr()}); left = newNode; diff --git a/src/nodeTester/ConditionalParser.cpp b/src/nodeTester/ConditionalParser.cpp index 3ca2843aabefe9f98bc8ad46a36fe03883d0baef..ba40c561375e0c09eb86009d447a782ab99d5d0b 100644 --- a/src/nodeTester/ConditionalParser.cpp +++ b/src/nodeTester/ConditionalParser.cpp @@ -1,23 +1,27 @@ +#include <memory> +#include <vector> #include "aidge/nodeTester/ConditionalParser.hpp" -using namespace Aidge; - ////////////////////////////// //ConditionalParser ////////////////////////////// -ConditionalParser::ConditionalParser(const std::string ConditionalExpressions):mLexer(ConditionalExpressions){ +Aidge::ConditionalParser::ConditionalParser(const std::string ConditionalExpressions) + : mLexer(ConditionalExpressions) +{ mCurrentToken = mLexer.getNextToken(); } -void ConditionalParser::rstParser(void){ +Aidge::ConditionalParser::~ConditionalParser() noexcept = default; + +void Aidge::ConditionalParser::rstParser(void){ mLexer.rstPosition(); mCurrentToken = mLexer.getNextToken(); } -void ConditionalParser::ackToken(ConditionalTokenTypes tokenType){ +void Aidge::ConditionalParser::ackToken(ConditionalTokenTypes tokenType){ if(mCurrentToken->getType() == tokenType ){ try { @@ -38,7 +42,7 @@ void ConditionalParser::ackToken(ConditionalTokenTypes tokenType){ -std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstVal(void){ +std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstVal(void){ /* val : (KEY|INTEGER|FOAT|STRING|LAMBDA) */ @@ -76,7 +80,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstV } -std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstLambda(void){ +std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstLambda(void){ /* AstLambda : LAMBDA val (ARGSEP val)* RPAREN */ @@ -94,7 +98,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstL return std::make_shared<AstNode<ConditionalTokenTypes>>(tokenLdb,paramLambda); } -std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstCmpr(void){ +std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstCmpr(void){ /* cmpr : val (EQ|NEQ) val | LPAREN expr RPAREN NOT ir ? @@ -125,7 +129,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstC } } -std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstExpr(std::size_t precLimit /*= 0*/){ +std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::constructAstExpr(std::size_t precLimit /*= 0*/){ /* expr : cmpr ((AND | OR) cmpr)* the NOT is not binary OP can be use in pratt @@ -134,27 +138,27 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstE OR */ - //the not + //the not std::shared_ptr<AstNode<ConditionalTokenTypes>> left; std::shared_ptr<ParsingToken<ConditionalTokenTypes>> token = mCurrentToken->copy(); - + if (mCurrentToken->getType() == ConditionalTokenTypes::NOT ){ ackToken(ConditionalTokenTypes::NOT ); left= std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{constructAstCmpr()}); }else{ left= constructAstCmpr(); } - + //pratt - while (mCurrentToken->getType() != ConditionalTokenTypes::STOP ) //security + while (mCurrentToken->getType() != ConditionalTokenTypes::STOP ) //security { token = mCurrentToken->copy(); - //if the token is not in the map is not a operator so we consider a prec of 0 + //if the token is not in the map is not a operator so we consider a prec of 0 if (ConditionalPrec.find(token->getType()) ==ConditionalPrec.end() ){ return left; } - //if my actual operator have a prec <= of the last operator + //if my actual operator have a prec <= of the last operator std::size_t prec = ConditionalPrec.at(token->getType()); if (prec <= precLimit){ return left; @@ -165,7 +169,7 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstE std::shared_ptr<AstNode<ConditionalTokenTypes>> right = constructAstExpr(prec); - //i'm not sur what append to newNode + //i'm not sur what append to newNode //std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,constructAstCmpr()}); std::shared_ptr<AstNode<ConditionalTokenTypes>> newNode = std::make_shared<AstNode<ConditionalTokenTypes>>(token,ASTNodeCh{left,right}); left = newNode; @@ -174,10 +178,10 @@ std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::constructAstE } -std::shared_ptr<AstNode<ConditionalTokenTypes>> ConditionalParser::parse(void){ +std::shared_ptr<Aidge::AstNode<Aidge::ConditionalTokenTypes>> Aidge::ConditionalParser::parse(void){ /* expr : cmpr ((AND | OR) cmpr)* - cmpr : val (EQ|NEQ) val | LPAREN expr RPAREN | BOOL | LAMBDA + cmpr : val (EQ|NEQ) val | LPAREN expr RPAREN | BOOL | LAMBDA val : (KEY|INTEGER|FOAT|STRING|LAMBDA) lambda : LAMBDA val (ARGSEP val)* RPAREN */ diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp index eb94db87df250767967348c3adfed8a1e35b4c5f..4adc57f55f7531c28c0c0603ee01c176bdd59e96 100644 --- a/src/operator/Operator.cpp +++ b/src/operator/Operator.cpp @@ -31,29 +31,6 @@ Aidge::Operator::~Operator() noexcept = default; // IMPLEMENTATION /////////////////////////////////////////////////////// -// std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::Operator::computeReceptiveField( -// const std::size_t firstIdx, const std::vector<Aidge::DimSize_t>& outputDims, const Aidge::IOIndex_t outputIdx) const -// { -// static_cast<void>(outputIdx); -// if (outputIdx >= nbOutputs()) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range."); -// } -// if (nbInputs() != nbDataInputs()) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function."); -// } -// if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); -// } -// const auto outputIdxDims = getOutput(0)->getCoord(firstIdx); -// for (DimIdx_t i = 0; i < outputDims.size(); ++i) { -// if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) { -// AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); -// } -// } -// // return the same Tensor description as given in function parameter for each data input -// return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbDataInputs(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims)); -// } - Aidge::NbElts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const { return mImpl->getNbRequiredData(inputIdx); } diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index 1d16e9064010269174501d3c824c705c36971641..1237fdc0b5565681ab1a6af6d88f74a48cbd5b57 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -88,6 +88,31 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getOutput(const Aid } +std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>> Aidge::OperatorTensor::computeReceptiveField( + const std::size_t firstIdx, + const std::vector<Aidge::DimSize_t>& outputDims, + const Aidge::IOIndex_t outputIdx) const +{ + static_cast<void>(outputIdx); + if (outputIdx >= nbOutputs()) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator output index out of range."); + } + if (nbInputs() != nbData()) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Operator has attributes. Must be handled in an overrided function."); + } + if (!outputDimsForwarded() || getOutput(0)->nbDims() != outputDims.size()) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range or output dim not forwarded yet."); + } + const auto outputIdxDims = getOutput(0)->getCoord(firstIdx); + for (DimIdx_t i = 0; i < outputDims.size(); ++i) { + if (((outputDims[i] + outputIdxDims[i]) > getOutput(0)->dims()[i]) || (outputDims[i] == 0)) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Given outputDim out of range for dimension %lu (%lu + %lu)", static_cast<std::size_t>(i), outputIdxDims[i], outputDims[i]); + } + } + // return the same Tensor description as given in function parameter for each data input + return std::vector<std::pair<std::size_t, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::size_t, std::vector<Aidge::DimSize_t>>(firstIdx, outputDims)); +} + void Aidge::OperatorTensor::computeOutputDims() { // check inputs have been associated bool associated = (nbInputs() > 0); // do not compute anything if no input diff --git a/src/recipies/HorizontalTiling.cpp b/src/recipies/HorizontalTiling.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d8eb015939e7be19eb866b75e5a5601ba80631d0 --- /dev/null +++ b/src/recipies/HorizontalTiling.cpp @@ -0,0 +1,93 @@ +/******************************************************************************** + * Copyright (c) 2023 CEA-List + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0. + * + * SPDX-License-Identifier: EPL-2.0 + * + ********************************************************************************/ + +#include <set> +#include <memory> +#include <vector> +#include <utility> + +#include "aidge/recipies/Recipies.hpp" + +#include "aidge/graph/Node.hpp" +#include "aidge/graph/GraphView.hpp" +#include "aidge/utils/ErrorHandling.hpp" +#include "aidge/operator/OperatorTensor.hpp" +#include "aidge/data/Data.hpp" +#include "aidge/utils/Types.h" + +#include "aidge/operator/Add.hpp" +#include "aidge/operator/Concat.hpp" +#include "aidge/operator/Slice.hpp" + +// TODO: assert Operator uses Tensors when implemented +std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std::shared_ptr<Aidge::Node>& node, + const Aidge::DimIdx_t axis, + const std::size_t nbSlices) +{ + if (node->getOperator()->type() != "Conv") { + AIDGE_INTERNAL_ASSERT("Operator should be a Convolution."); + } + const auto& op = std::dynamic_pointer_cast<OperatorTensor>(node->getOperator()); + if (op->nbOutputs() != 1 || op->nbData() > 1) { + AIDGE_INTERNAL_ASSERT("Only slice Operators with one output and at most one input for now."); + } + if (!op->outputDimsForwarded()) { + AIDGE_INTERNAL_ASSERT("Dimensions must be forwarded before any tiling"); + } + // start by doing a tiling with strict dimensions division + const auto& outTensor = op->getOutput(0); + if (op->getOutput(0)->dims()[axis] % nbSlices != 0) { + AIDGE_INTERNAL_ASSERT("axis should be a multiple of nbSlices"); + } + + // dimensions of a Slice + std::vector<DimSize_t> outputDims = outTensor->dims(); + outputDims[axis] /= nbSlices; + + std::vector<DimSize_t> currentFirstDims = std::vector<DimSize_t>(outTensor->nbDims(), 0); + + std::set<std::shared_ptr<Aidge::Node>> res; + auto concat = Concat(nbSlices, axis); + res.insert(concat); + + // check slice sizes + // const auto inputDims = op->computeReceptiveField(currentFirstDims[axis], outputDims, 0); + // std::vector<bool> shareTensor(node->nbInputs(), false); + // for (DimSize_t inputID = 0; inputID < node->nbInputs(); ++inputID) { + // const auto inTensor = std::dynamic_pointer_cast<Tensor>(node->getOperator()->getRawInput(inputID)); + // if (inTensor->dims() == inputDims[inputID].second) + // shareTensor[inputID] = true; + // } + + std::vector<std::shared_ptr<Node>> clonedInputs = std::vector<std::shared_ptr<Node>>(node->nbInputs(), nullptr); + for (std::size_t i = node->nbData(); i < node ->nbInputs(); ++i) { + clonedInputs[i] = node -> getParent(i) -> cloneSharedOperators(); + clonedInputs[i] -> setName(node -> name() + "_0"); + res.insert(clonedInputs[i]); + } + + for (; currentFirstDims[axis] < outTensor->dims()[axis]; currentFirstDims[axis] += outputDims[axis]) { + const auto inputDims = op->computeReceptiveField(outTensor->getIdx(currentFirstDims), outputDims, 0); + auto newNode = node -> clone(); // no input associated to clones + newNode -> setName(node->name() + "_" + std::to_string(currentFirstDims[axis])); + clonedInputs[1] -> addChild(newNode, 0, 1); + clonedInputs[2] -> addChild(newNode, 0, 2); + // Slice for input and each parameter + auto slice = Slice(inputDims[0].first, inputDims[0].second, "Slice_" + std::to_string(currentFirstDims[axis])); + slice -> addChild(newNode, 0, 0); + newNode -> addChild(concat, 0, currentFirstDims[axis]); + + res.insert(slice); + res.insert(newNode); + } + + return res; +} \ No newline at end of file diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp index 1f34091e54c0f83dae6b60589c20fb8fdf1d5064..3afbcd0442fd40214687751d50bfc98809bba840 100644 --- a/src/scheduler/Scheduler.cpp +++ b/src/scheduler/Scheduler.cpp @@ -19,6 +19,7 @@ #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Node.hpp" #include "aidge/utils/Types.h" +#include "aidge/operator/OperatorTensor.hpp" void drawProgressBar(double progress, int barWidth, const std::string& additionalInfo = "") { putchar('['); diff --git a/unit_tests/operator/Test_ConvDepthWise_Op.cpp b/unit_tests/operator/Test_ConvDepthWise_Op.cpp index ef68c439d3a3cdf95b7122c1b41bc9fc97311f2d..14d4dc537f527b32414151ee7f93e601f5a4bd8a 100644 --- a/unit_tests/operator/Test_ConvDepthWise_Op.cpp +++ b/unit_tests/operator/Test_ConvDepthWise_Op.cpp @@ -22,47 +22,52 @@ #include "aidge/utils/Types.h" namespace Aidge { -// TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") { -// auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider"); -// auto conv1 = ConvDepthWise({5, 5}, "conv1"); // output dims: {16, 3, 220, 220} -// auto conv2 = ConvDepthWise({3, 3}, "conv2"); // output dims: {16, 3, 218, 218} -// auto conv3 = ConvDepthWise({2, 2}, "conv3", {2,2}); // output dims: {16, 3, 109, 109} -// auto conv4 = ConvDepthWise({1, 1}, "conv4"); // output dims: {16, 3, 109, 109} +TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator][computeReceptiveFiled][ConvDepthWise]") { + auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider"); + auto cdw1 = ConvDepthWise(3, {5, 5}, "cdw1"); // output dims: {16, 3, 220, 220} + auto cdw2 = ConvDepthWise(3, {3, 3}, "cdw2"); // output dims: {16, 3, 218, 218} + auto cdw3 = ConvDepthWise(3, {2, 2}, "cdw3", {2,2}); // output dims: {16, 3, 109, 109} + auto cdw4 = ConvDepthWise(3, {1, 1}, "cdw4"); // output dims: {16, 3, 109, 109} -// auto g = std::make_shared<GraphView>("TestGraph"); + auto g = std::make_shared<GraphView>("TestGraph"); -// dataProvider->addChild(conv1, 0); -// g->add(conv1); -// g->addChild(conv2, conv1, 0); -// g->addChild(conv3, conv2, 0); -// g->addChild(conv4, conv3, 0); + dataProvider->addChild(cdw1, 0); + g->add(cdw1); + g->addChild(cdw2, cdw1, 0); + g->addChild(cdw3, cdw2, 0); + g->addChild(cdw4, cdw3, 0); -// g->forwardDims(); + g->forwardDims(); -// SECTION("Check individual receptive fields") { -// auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,3,10,10}); -// auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,1,100,28}), {4,2,30,40}); -// auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109}); -// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,108,108}), {10,1,1,1}); + auto op1 = std::dynamic_pointer_cast<OperatorTensor>(cdw1 -> getOperator()); + auto op2 = std::dynamic_pointer_cast<OperatorTensor>(cdw2 -> getOperator()); + auto op3 = std::dynamic_pointer_cast<OperatorTensor>(cdw3 -> getOperator()); + auto op4 = std::dynamic_pointer_cast<OperatorTensor>(cdw4 -> getOperator()); -// REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14})))); -// REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42})))); -// REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218})))); -// REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1})))); -// } + SECTION("Check individual receptive fields") { + auto res1 = op1->computeReceptiveField(0, {16,3,10,10}); + auto res2 = op2->computeReceptiveField(op2->getOutput(0)->getIdx({3,1,100,28}), {4,2,30,40}); + auto res3 = op3->computeReceptiveField(0, {1,1,109,109}); + auto res4 = op4->computeReceptiveField(op4->getInput(0)->getIdx({5,0,108,108}), {10,1,1,1}); -// SECTION("Check receptive field propagation") { -// // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} -// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->input(0).getIdx({5,0,50,50}), {1,1,1,1}); -// // conv4 RF: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} -// auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second); -// // conv3 RF: first-{5, 0, 100, 100} dims-{1, 1, 2, 2} -// auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second); -// // conv2 RF: first-{5, 0, 100, 100} dims-{1, 1, 4, 4} -// auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second); -// // conv1 RF: first-{5, 0, 100, 100} dims-{1, 1, 8, 8} + REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14})))); + REQUIRE(((res2[0].first == op2->getInput(0)->getIdx({3,1,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 2, 32, 42})))); + REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 1, 218, 218})))); + REQUIRE(((res4[0].first == op4->getInput(0)->getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 1, 1, 1})))); + } -// REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8})))); -// } -// } + SECTION("Check receptive field propagation") { + // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} + auto res4 = op4->computeReceptiveField(op4->getInput(0)->getIdx({5,0,50,50}), {1,1,1,1}); + // cdw4 RF: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} + auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second); + // cdw3 RF: first-{5, 0, 100, 100} dims-{1, 1, 2, 2} + auto res2 = op2->computeReceptiveField(res3[0].first, res3[0].second); + // cdw2 RF: first-{5, 0, 100, 100} dims-{1, 1, 4, 4} + auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second); + // cdw1 RF: first-{5, 0, 100, 100} dims-{1, 1, 8, 8} + + REQUIRE(((res1[0].first == op1->getInput(0)->getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8})))); + } +} } // namespace Aidge \ No newline at end of file diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp index ac667ec5af69dccc3e421530a17aca88018aab09..a3e84999eb2e2a31f1217330ac9718f35b0ca396 100644 --- a/unit_tests/operator/Test_Conv_Op.cpp +++ b/unit_tests/operator/Test_Conv_Op.cpp @@ -22,58 +22,65 @@ #include "aidge/utils/Types.h" namespace Aidge { -// TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") { -// auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider"); -// auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220} -// auto conv2 = Conv(32, 64, {3, 3}, "conv2"); // output dims: {16, 64, 218, 218} -// auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2}); // output dims: {16, 10, 109, 109} -// auto conv4 = Conv(10, 10, {1, 1}, "conv4"); // output dims: {16, 10, 109, 109} +TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeReceptiveField][Conv]") { + auto dataProvider = Producer({16, 3, 224, 224}, "dataProvider"); + auto conv1 = Conv(3, 32, {5, 5}, "conv1"); // output dims: {16, 32, 220, 220} + auto conv2 = Conv(32, 64, {3, 3}, "conv2"); // output dims: {16, 64, 218, 218} + auto conv3 = Conv(64, 10, {2, 2}, "conv3", {2,2}); // output dims: {16, 10, 109, 109} + auto conv4 = Conv(10, 10, {1, 1}, "conv4"); // output dims: {16, 10, 109, 109} -// auto g = std::make_shared<GraphView>("TestGraph"); + auto g = std::make_shared<GraphView>("TestGraph"); -// dataProvider->addChild(conv1, 0); -// g->add(conv1); -// g->addChild(conv2, conv1, 0); -// g->addChild(conv3, conv2, 0); -// g->addChild(conv4, conv3, 0); + dataProvider->addChild(conv1, 0); + g->add(conv1); + g->addChild(conv2, conv1, 0); + g->addChild(conv3, conv2, 0); + g->addChild(conv4, conv3, 0); -// g->forwardDims(); + g->forwardDims(); -// SECTION("Check individual receptive fields") { -// auto res1 = conv1->getOperator()->computeReceptiveField(0, {16,32,10,10}); -// auto res2 = conv2->getOperator()->computeReceptiveField(conv2->getOperator()->output(0).getIdx({3,20,100,28}), {4,20,30,40}); -// auto res3 = conv3->getOperator()->computeReceptiveField(0, {1,1,109,109}); -// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,108,108}), {10,10,1,1}); + auto op1 = std::dynamic_pointer_cast<OperatorTensor>(conv1 -> getOperator()); + auto op2 = std::dynamic_pointer_cast<OperatorTensor>(conv2 -> getOperator()); + auto op3 = std::dynamic_pointer_cast<OperatorTensor>(conv3 -> getOperator()); + auto op4 = std::dynamic_pointer_cast<OperatorTensor>(conv4 -> getOperator()); -// REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14})))); -// REQUIRE(((res2[0].first == conv2->getOperator()->input(0).getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42})))); -// REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218})))); -// REQUIRE(((res4[0].first == conv4->getOperator()->input(0).getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1})))); -// } + SECTION("Check individual receptive fields") { + auto res1 = op1 -> computeReceptiveField(0, {16,32,10,10}); + auto res2 = op2 -> computeReceptiveField(op2 -> getOutput(0)->getIdx({3,20,100,28}), {4,20,30,40}); + auto res3 = op3 -> computeReceptiveField(0, {1,1,109,109}); + auto res4 = op4 -> computeReceptiveField(op4 -> getOutput(0)->getIdx({5,0,108,108}), {10,10,1,1}); -// SECTION("Check receptive field propagation") { -// // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} -// auto res4 = conv4->getOperator()->computeReceptiveField(conv4->getOperator()->output(0).getIdx({5,0,50,50}), {1,1,1,1}); -// // conv4 RF: first-{5, 0, 50, 50} dims-{1, 10, 1, 1} -// auto res3 = conv3->getOperator()->computeReceptiveField(res4[0].first, res4[0].second); -// // conv3 RF: first-{5, 0, 100, 100} dims-{1, 64, 2, 2} -// auto res2 = conv2->getOperator()->computeReceptiveField(res3[0].first, res3[0].second); -// // conv2 RF: first-{5, 0, 100, 100} dims-{1, 32, 4, 4} -// auto res1 = conv1->getOperator()->computeReceptiveField(res2[0].first, res2[0].second); -// // conv1 RF: first-{5, 0, 100, 100} dims-{1, 3, 8, 8} + REQUIRE(((res1[0].first == 0) && (res1[0].second == std::vector<DimSize_t>({16, 3, 14, 14})))); + REQUIRE(((res1[1].first == 0) && (res1[1].second == std::vector<DimSize_t>({32, 3, 5, 5})))); + REQUIRE(((res1[2].first == 0) && (res1[2].second == std::vector<DimSize_t>({32})))); + REQUIRE(((res2[0].first == op2->getInput(0)->getIdx({3,0,100,28})) && (res2[0].second == std::vector<DimSize_t>({4, 32, 32, 42})))); + REQUIRE(((res3[0].first == 0) && (res3[0].second == std::vector<DimSize_t>({1, 64, 218, 218})))); + REQUIRE(((res4[0].first == op4->getInput(0)->getIdx({5, 0, 108, 108})) && (res4[0].second == std::vector<DimSize_t>({10, 10, 1, 1})))); + } -// REQUIRE(((res1[0].first == conv1->getOperator()->input(0).getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8})))); + SECTION("Check receptive field propagation") { + // input: first-{5, 0, 50, 50} dims-{1, 1, 1, 1} + auto res4 = op4->computeReceptiveField(op4->getOutput(0)->getIdx({5,0,50,50}), {1,1,1,1}); + // conv4 RF: first-{5, 0, 50, 50} dims-{1, 10, 1, 1} + auto res3 = op3->computeReceptiveField(res4[0].first, res4[0].second); + // conv3 RF: first-{5, 0, 100, 100} dims-{1, 64, 2, 2} + auto res2 = op2->computeReceptiveField(res3[0].first, res3[0].second); + // conv2 RF: first-{5, 0, 100, 100} dims-{1, 32, 4, 4} + auto res1 = op1->computeReceptiveField(res2[0].first, res2[0].second); + // conv1 RF: first-{5, 0, 100, 100} dims-{1, 3, 8, 8} + REQUIRE(((res1[0].first == op1->getInput(0)->getIdx({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 3, 8, 8})))); -// // std::cout << "conv1: {"; -// // std::cout << conv1->getOperator()->input(0).getCoord(res1[0].first)[0] << ", " -// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[1] << ", " -// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[2] << ", " -// // << conv1->getOperator()->input(0).getCoord(res1[0].first)[3] << "} - {"; -// // std::cout << res1[0].second[0] << ", " -// // << res1[0].second[1] << ", " -// // << res1[0].second[2] << ", " -// // << res1[0].second[3] << "}" << std::endl; -// } -// } + + // std::cout << "conv1: {"; + // std::cout << op1->input(0).getCoord(res1[0].first)[0] << ", " + // << op1->input(0).getCoord(res1[0].first)[1] << ", " + // << op1->input(0).getCoord(res1[0].first)[2] << ", " + // << op1->input(0).getCoord(res1[0].first)[3] << "} - {"; + // std::cout << res1[0].second[0] << ", " + // << res1[0].second[1] << ", " + // << res1[0].second[2] << ", " + // << res1[0].second[3] << "}" << std::endl; + } +} } // namespace Aidge \ No newline at end of file diff --git a/unit_tests/recipies/Test_HorizontalTiling.cpp b/unit_tests/recipies/Test_HorizontalTiling.cpp deleted file mode 100644 index c9fb5ed6dc8a5d994ce2d3434a8176c29e418f95..0000000000000000000000000000000000000000 --- a/unit_tests/recipies/Test_HorizontalTiling.cpp +++ /dev/null @@ -1,200 +0,0 @@ -// /******************************************************************************** -// * Copyright (c) 2023 CEA-List -// * -// * This program and the accompanying materials are made available under the -// * terms of the Eclipse Public License 2.0 which is available at -// * http://www.eclipse.org/legal/epl-2.0. -// * -// * SPDX-License-Identifier: EPL-2.0 -// * -// ********************************************************************************/ - -// #include <catch2/catch_test_macros.hpp> -// #include <set> - -// #include "aidge/graph/GraphView.hpp" -// #include "aidge/graph/OpArgs.hpp" -// #include "aidge/operator/Conv.hpp" -// #include "aidge/operator/ReLU.hpp" -// #include "aidge/recipies/Recipies.hpp" - - -// namespace Aidge { - -// TEST_CASE("[core/recipies] Tiling(transformation)", "[Tiling][Recipies]") { - -// SECTION("Transform a pre-generated GraphView") { - -// SECTION("Simple Node: Conv") { -// std::shared_ptr<Node> myConv = Conv(3,4,{3,3}, "myconv"); -// myConv->getOperator()->setDatatype(DataType::Int32); -// myConv->getOperator()->setBackend("cpu"); -// std::shared_ptr<Tensor> myWeights = std::make_shared<Tensor>(Array4D<int,4,3,3,3> { -// { -// { -// {{ 0, 1, 2}, -// { 3, 4, 5}, -// { 6, 7, 8}}, -// {{ 9, 10, 11}, -// { 12, 13, 14}, -// { 15, 16, 17}}, -// {{ 18, 19, 20}, -// { 21, 22, 23}, -// { 24, 25, 26}} -// }, -// { -// {{ 27, 28, 29}, -// { 30, 31, 32}, -// { 33, 34, 35}}, -// {{ 36, 37, 38}, -// { 39, 40, 41}, -// { 42, 43, 44}}, -// {{ 45, 46, 47}, -// { 48, 49, 50}, -// { 51, 52, 53}} -// }, -// { -// {{ 54, 55, 56}, -// { 57, 58, 59}, -// { 60, 61, 62}}, -// {{ 63, 64, 65}, -// { 66, 67, 68}, -// { 69, 70, 71}}, -// {{ 72, 73, 74}, -// { 75, 76, 77}, -// { 78, 79, 80}} -// }, -// { -// {{ 81, 82, 83}, -// { 84, 85, 86}, -// { 87, 88, 89}}, -// {{ 90, 91, 92}, -// { 93, 94, 95}, -// { 96, 97, 98}}, -// {{ 99, 100, 101}, -// {102, 103, 104}, -// {105, 106, 107}} -// } -// } -// }); -// std::shared_ptr<Tensor> myBias = std::make_shared<Tensor>(Array1D<int,4> {{7,0,9,0}}); -// std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(Array4D<int,2,3,5,5> { //NCHW -// { -// { -// {{ 0, 1, 2, 3, 4}, -// { 5, 6, 7, 8, 9}, -// { 10, 11, 12, 13, 14}, -// { 15, 16, 17, 18, 19}, -// { 20, 21, 22, 23, 24}}, - -// {{ 25, 26, 27, 28, 29}, -// { 30, 31, 32, 33, 34}, -// { 35, 36, 37, 38, 39}, -// { 40, 41, 42, 43, 44}, -// { 45, 46, 47, 48, 49}}, - -// {{ 50, 51, 52, 53, 54}, -// { 55, 56, 57, 58, 59}, -// { 60, 61, 62, 63, 64}, -// { 65, 66, 67, 68, 69}, -// { 70, 71, 72, 73, 74}} -// }, -// { -// {{ 75, 76, 77, 78, 79}, -// { 80, 81, 82, 83, 84}, -// { 85, 86, 87, 88, 89}, -// { 90, 91, 92, 93, 94}, -// { 95, 96, 97, 98, 99}}, - -// {{100, 101, 102, 103, 104}, -// {105, 106, 107, 108, 109}, -// {110, 111, 112, 113, 114}, -// {115, 116, 117, 118, 119}, -// {120, 121, 122, 123, 124}}, - -// {{125, 126, 127, 128, 129}, -// {130, 131, 132, 133, 134}, -// {135, 136, 137, 138, 139}, -// {140, 141, 142, 143, 144}, -// {145, 146, 147, 148, 149}} -// } -// } -// }); -// std::shared_ptr<Tensor> myOutput = std::make_shared<Tensor>(Array4D<int,2,4,3,3> { -// { -// { -// {{ 15226, 15577, 15928}, -// { 16981, 17332, 17683}, -// { 18736, 19087, 19438}}, - -// {{ 37818, 38898, 39978}, -// { 43218, 44298, 45378}, -// { 48618, 49698, 50778}}, - -// {{ 60426, 62235, 64044}, -// { 69471, 71280, 73089}, -// { 78516, 80325, 82134}}, - -// {{ 83016, 85554, 88092}, -// { 95706, 98244, 100782}, -// {108396, 110934, 113472}} -// }, -// { -// {{ 41551, 41902, 42253}, -// { 43306, 43657, 44008}, -// { 45061, 45412, 45763}}, - -// {{118818, 119898, 120978}, -// {124218, 125298, 126378}, -// {129618, 130698, 131778}}, - -// {{196101, 197910, 199719}, -// {205146, 206955, 208764}, -// {214191, 216000, 217809}}, - -// {{273366, 275904, 278442}, -// {286056, 288594, 291132}, -// {298746, 301284, 303822}} -// } -// } -// }); -// myConv->getOperator()->associateInput(0,myInput); -// myConv->getOperator()->associateInput(1,myWeights); -// myConv->getOperator()->associateInput(2,myBias); -// myConv->getOperator()->computeOutputDims(); - -// std::shared_ptr<GraphView> g; -// g->add(myConv); -// horizontalTiling({myConv}, 3); - -// SequentialScheduler s(g); -// s->forward(); - -// // myConv->getOperator()->getOutput(0)->print(); -// REQUIRE(*(myConv->getOperator()->getOutput(0)) == *myOutput); -// } -// } -// } -// } -// // std::shared_ptr<GraphView> g = Sequential({ -// // Conv(3, 16, {3,3}, "conv1"), -// // ReLU("relu1"), -// // Conv(16, 32, {1,1}, "conv2"), -// // Conv(32, 16, {1,1}, "conv3"), -// // Conv(16, 10, {3,3}, "conv4"), -// // ReLU("relu2") -// // }); - -// // for (auto& individualConv : g->match("Conv")) { -// // auto tiledConv = horizontalTiling(individualConv); -// // g->replace(individualConv, tiledConv); -// // } -// // } - -// // SECTION("Create the GraphView with tiled layers") { -// // std::shared_ptr<GraphView> g; -// // g->addChild(horizontalTiling(Conv())) -// // } - -// // } -// // } // namespace Aidge \ No newline at end of file