From 5de72b4e2f9a8a18c87aca625136e4326f5c266c Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Thu, 28 Sep 2023 14:23:24 +0000 Subject: [PATCH] [Upd] Each Operator copy constructor create a new implementation with same backend --- include/aidge/operator/Add.hpp | 3 ++- include/aidge/operator/AvgPooling.hpp | 3 ++- include/aidge/operator/BatchNorm.hpp | 1 + include/aidge/operator/Conv.hpp | 1 + include/aidge/operator/ConvDepthWise.hpp | 1 + include/aidge/operator/FC.hpp | 1 + include/aidge/operator/GenericOperator.hpp | 2 +- include/aidge/operator/LeakyReLU.hpp | 1 + include/aidge/operator/Matmul.hpp | 1 + include/aidge/operator/MaxPooling.hpp | 3 ++- include/aidge/operator/Operator.hpp | 1 + include/aidge/operator/Producer.hpp | 1 + include/aidge/operator/ReLU.hpp | 1 + include/aidge/operator/Scaling.hpp | 1 + include/aidge/operator/Softmax.hpp | 1 + 15 files changed, 18 insertions(+), 4 deletions(-) diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp index 8991a6f5f..303092911 100644 --- a/include/aidge/operator/Add.hpp +++ b/include/aidge/operator/Add.hpp @@ -51,7 +51,7 @@ public: * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - Add_Op(const Add_Op& op) + Add_Op(const Add_Op<NUM>& op) : Operator(Type), mOutput(std::make_shared<Tensor>(*op.mOutput)) { @@ -61,6 +61,7 @@ public: mInputs[i] = std::make_shared<Tensor>(); } setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<Add_Op<NUM>>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp index 4f6c2e6ce..2fbff53c3 100644 --- a/include/aidge/operator/AvgPooling.hpp +++ b/include/aidge/operator/AvgPooling.hpp @@ -66,13 +66,14 @@ public: * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - AvgPooling_Op(const AvgPooling_Op& op) + AvgPooling_Op(const AvgPooling_Op<DIM>& op) : Operator(Type), Parameterizable_(op), mOutput(std::make_shared<Tensor>(*op.mOutput)) { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp index cb22eda33..f1a6ae8f5 100644 --- a/include/aidge/operator/BatchNorm.hpp +++ b/include/aidge/operator/BatchNorm.hpp @@ -66,6 +66,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp index 3015069ea..e95b46ae5 100644 --- a/include/aidge/operator/Conv.hpp +++ b/include/aidge/operator/Conv.hpp @@ -76,6 +76,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp index 3d0e5c931..12d15328c 100644 --- a/include/aidge/operator/ConvDepthWise.hpp +++ b/include/aidge/operator/ConvDepthWise.hpp @@ -81,6 +81,7 @@ class ConvDepthWise_Op : public Operator, { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index c17344694..73cdab54c 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -67,6 +67,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<FC_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp index 68164dda2..184100174 100644 --- a/include/aidge/operator/GenericOperator.hpp +++ b/include/aidge/operator/GenericOperator.hpp @@ -159,7 +159,7 @@ class GenericOperator_Op if (mComputeOutputDims) { return !(mOutputs[0]->empty()); } - else { + else { assert(false && "GenericOperator cannot forward dims"); return false; } diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp index 2553b46d8..dc9548515 100644 --- a/include/aidge/operator/LeakyReLU.hpp +++ b/include/aidge/operator/LeakyReLU.hpp @@ -64,6 +64,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Matmul.hpp b/include/aidge/operator/Matmul.hpp index fc0032951..54bbcb267 100644 --- a/include/aidge/operator/Matmul.hpp +++ b/include/aidge/operator/Matmul.hpp @@ -65,6 +65,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<Matmul_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp index 5eff15f72..775583fd4 100644 --- a/include/aidge/operator/MaxPooling.hpp +++ b/include/aidge/operator/MaxPooling.hpp @@ -67,13 +67,14 @@ public: * @brief Copy-constructor. Copy the operator parameters and its output tensor(s), but not its input tensors (the new operator has no input associated). * @param op Operator to copy. */ - MaxPooling_Op(const MaxPooling_Op& op) + MaxPooling_Op(const MaxPooling_Op<DIM>& op) : Operator(Type), Parameterizable_(op), mOutput(std::make_shared<Tensor>(*op.mOutput)) { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp index f99b1e26d..3ac651cfd 100644 --- a/include/aidge/operator/Operator.hpp +++ b/include/aidge/operator/Operator.hpp @@ -42,6 +42,7 @@ public: std::enable_shared_from_this<Operator>() { mType = op.mType; + mImpl = nullptr; // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation. // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion. // Hooks are not copied. diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp index 937105fbb..fbab24a0d 100644 --- a/include/aidge/operator/Producer.hpp +++ b/include/aidge/operator/Producer.hpp @@ -60,6 +60,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<Producer_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp index 2f9751a82..cebfa5718 100644 --- a/include/aidge/operator/ReLU.hpp +++ b/include/aidge/operator/ReLU.hpp @@ -52,6 +52,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<ReLU_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp index a2b409184..e3cba81a4 100644 --- a/include/aidge/operator/Scaling.hpp +++ b/include/aidge/operator/Scaling.hpp @@ -66,6 +66,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<Scaling_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp index 193fd811b..ffaf0001f 100644 --- a/include/aidge/operator/Softmax.hpp +++ b/include/aidge/operator/Softmax.hpp @@ -52,6 +52,7 @@ public: { // cpy-ctor setDatatype(op.mOutput->dataType()); + mImpl = op.mImpl ? Registrar<Softmax_Op>::create(mOutput->getImpl()->backend())(*this) : nullptr; } /** -- GitLab