From e7ce72cdfc4ae7538e52fe256b279b5ee70af4b5 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Fri, 1 Mar 2024 09:11:07 +0000
Subject: [PATCH] Update clone function with SET_IMPL_MACRO.

---
 include/aidge/operator/Add.hpp           | 6 +++++-
 include/aidge/operator/AvgPooling.hpp    | 6 +++++-
 include/aidge/operator/BatchNorm.hpp     | 6 +++++-
 include/aidge/operator/Concat.hpp        | 6 +++++-
 include/aidge/operator/Conv.hpp          | 6 +++++-
 include/aidge/operator/ConvDepthWise.hpp | 6 +++++-
 include/aidge/operator/Div.hpp           | 6 +++++-
 include/aidge/operator/Erf.hpp           | 6 +++++-
 include/aidge/operator/FC.hpp            | 6 +++++-
 include/aidge/operator/Gather.hpp        | 6 +++++-
 include/aidge/operator/LeakyReLU.hpp     | 7 +++++--
 include/aidge/operator/MaxPooling.hpp    | 6 +++++-
 include/aidge/operator/Move.hpp          | 8 ++++++--
 include/aidge/operator/Pow.hpp           | 6 +++++-
 include/aidge/operator/Producer.hpp      | 6 +++++-
 include/aidge/operator/ReLU.hpp          | 6 +++++-
 include/aidge/operator/ReduceMean.hpp    | 6 +++++-
 include/aidge/operator/Reshape.hpp       | 6 +++++-
 include/aidge/operator/Scaling.hpp       | 2 +-
 include/aidge/operator/Slice.hpp         | 7 +++++--
 include/aidge/operator/Softmax.hpp       | 6 +++++-
 include/aidge/operator/Sqrt.hpp          | 6 +++++-
 include/aidge/operator/Sub.hpp           | 6 +++++-
 include/aidge/operator/Transpose.hpp     | 6 +++++-
 24 files changed, 117 insertions(+), 27 deletions(-)

diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 58ff87cf7..3115cedca 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -47,7 +47,11 @@ public:
     Add_Op(const Add_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Add_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index d6b4fdde5..e4714111a 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -60,7 +60,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 7d57a9033..83ad2dbbb 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -54,7 +54,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 9f237b063..2f398ae71 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Concat_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 54310c021..690af2939 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -65,7 +65,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 432623d30..d1164468f 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -67,7 +67,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index b998e9ee2..be654a3c0 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -40,7 +40,11 @@ public:
     Div_Op(const Div_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Div_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 895d58a87..5a92b5dc4 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -40,7 +40,11 @@ public:
     Erf_Op(const Erf_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Erf_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 61735b99a..93e5c869d 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -57,7 +57,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(FC_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 1e5957e83..142f6582a 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -58,7 +58,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Gather_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index d0fd2733a..c48b85b4a 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -54,7 +54,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -68,7 +72,6 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
         SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 6c4065f3f..c6a08cb8e 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -64,7 +64,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 62fb98973..2db7d49af 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -39,7 +39,11 @@ public:
     Move_Op(const Move_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Move_Op>::create({mInputs[0]->getImpl()->backend(), mOutputs[0]->getImpl()->backend()})(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Move_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -72,4 +76,4 @@ inline std::shared_ptr<Node> Move(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index e8894d1a2..ec4eebf9d 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -40,7 +40,11 @@ public:
     Pow_Op(const Pow_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Pow_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index e31d8c7d2..5a2266074 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -65,7 +65,11 @@ public:
         for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
             mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
         }
-        mImpl = op.mImpl ? Registrar<Producer_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Producer_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 7a5144f48..5b8f5c4b8 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -39,7 +39,11 @@ public:
     ReLU_Op(const ReLU_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 70fe03635..09f1d5835 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -57,7 +57,11 @@ class ReduceMean_Op : public OperatorTensor,
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 410f55e5b..8914bbc9a 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -53,7 +53,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 54f1d98d2..1bc84b690 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -95,4 +95,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 3635eb32c..363c3c2b4 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -55,8 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Slice_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this)
-                         : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Slice_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
 public:
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index a63827a85..943f69a58 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Softmax_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 69a1ffba9..dd3fa541b 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -45,7 +45,11 @@ public:
     Sqrt_Op(const Sqrt_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Sqrt_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 721b68a44..5683a9be5 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -45,7 +45,11 @@ public:
     Sub_Op(const Sub_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sub_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Sub_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index f081f830a..b040fc907 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -56,7 +56,11 @@ class Transpose_Op : public OperatorTensor,
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
-- 
GitLab