diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad7ee666ebb56941cdc426220cd117a0e3f8b8d1
--- /dev/null
+++ b/aidge_core/unit_tests/test_impl.py
@@ -0,0 +1,72 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from functools import reduce
+
+import numpy as np
+
+GLOBAL_CPT = 0
+
+class testImpl(aidge_core.OperatorImpl):
+    def __init__(self, op: aidge_core.Operator):
+        aidge_core.OperatorImpl.__init__(self, op) # Required to avoid type error !
+
+    def forward(self):
+        global GLOBAL_CPT
+        GLOBAL_CPT += 1
+
+class test_OperatorImpl(unittest.TestCase):
+    """Test Op
+    """
+    def setUp(self):
+        global GLOBAL_CPT
+        GLOBAL_CPT = 0
+    def tearDown(self):
+        pass
+
+    def test_setImplementation(self):
+        """Test setting an implementation manually
+        """
+        global GLOBAL_CPT
+        matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0")
+        generic_matmul_op = matmul.get_operator()
+        generic_matmul_op.set_compute_output_dims(lambda x: x)
+        generic_matmul_op.set_impl(testImpl(generic_matmul_op))
+        generic_matmul_op.forward()
+        self.assertEqual(GLOBAL_CPT, 1)
+
+    def test_Registrar_setOp(self):
+        """Test registering an implementation
+        """
+        global GLOBAL_CPT
+        aidge_core.register_ConvOp2D("cpu", testImpl)
+        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
+        conv.get_operator().set_backend("cpu")
+        conv.get_operator().forward()
+        self.assertEqual(GLOBAL_CPT, 1)
+
+    def test_Registrar_setGraphView(self):
+        """Test registering an implementation
+        """
+        global GLOBAL_CPT
+        aidge_core.register_ConvOp2D("cpu", testImpl)
+        aidge_core.register_ProducerOp("cpu", testImpl)
+        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
+        model = aidge_core.sequential([conv])
+        model.set_backend("cpu")
+        conv.get_operator().forward()
+        self.assertEqual(GLOBAL_CPT, 1)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 97a4ef69bd371e80c4e63303feac5e64197670b3..3115cedca1f2a3bcc4a1330b96e90669bf7611a2 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -28,7 +28,7 @@
 namespace Aidge {
 
 class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
+    public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> {
 public:
     static const std::string Type;
 
@@ -47,7 +47,11 @@ public:
     Add_Op(const Add_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Add_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -71,7 +75,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Add_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Add_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 5066cb78f86bfc87d33fce4ecd8f302c40cb14d2..e427aac72ad3948d0d03f588c930cfccedfb1885 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -30,7 +30,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
                 public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>> {
@@ -60,7 +60,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -137,7 +141,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -177,4 +181,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 4a0f40c034c7738a33eb8a9569fac4aa2fff465d..83ad2dbbb695e42c11cb794c7d5bd4578056d941 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -30,7 +30,7 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
                 public StaticAttributes<BatchNormAttr, float, float> {
 public:
     static const std::string Type;
@@ -54,7 +54,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -95,7 +99,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for scale, shift, mean and variance
@@ -136,4 +140,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 62a9540105d77866167d87b9733ed473e03f0151..450c40bd210e0a4be891e436f03330a984e221be 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -29,7 +29,7 @@ namespace Aidge {
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
+    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
     public StaticAttributes<ConcatAttr, DimSize_t> {
 public:
     static const std::string Type;
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Concat_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -108,7 +112,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Concat_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Concat_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 8290fb3d0d978e9af3291809c5057406424096d5..82cd5df8e24457bd9f5e07c89826904c7d2283ad 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -23,7 +23,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -31,7 +31,7 @@ enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelD
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
+                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
                 public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                        DimSize_t, std::array<DimSize_t, DIM>> {
 
@@ -65,7 +65,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -174,7 +178,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
@@ -245,4 +249,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
 };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index a3b537ba60d03209e078dc94348f001603d2f3f5..7fa9124d4c750cee53d9c4a402a2fa6196ac8158 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -30,7 +30,7 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
@@ -67,7 +67,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -168,7 +172,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index a033c6920a374003ad869bddbf5641c48fc5f6e2..be654a3c015e5810892c1e23f08cc1f4b83b2d93 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
+    public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> {
 
 public:
     static const std::string Type;
@@ -40,7 +40,11 @@ public:
     Div_Op(const Div_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Div_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -55,7 +59,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Div_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Div_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 6995cea5e4af9a17cf3d24516d9840850e701669..5a92b5dc45b6a090be0d9306dbfc21b1c0ae6edb 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> {
+    public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> {
 public:
     static const std::string Type;
 
@@ -40,7 +40,11 @@ public:
     Erf_Op(const Erf_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Erf_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -52,7 +56,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Erf_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Erf_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index f6d81b5781dd25c990f496fa9f592502c9705eba..c111e38b00e69c8d0aecd9df0023f07a47a3865d 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -32,7 +32,7 @@ enum class FCAttr { OutChannels, NoBias };
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const FC_Op &)>,
+                                 std::shared_ptr<OperatorImpl>(const FC_Op &)>,
               public StaticAttributes<FCAttr, DimSize_t, bool> {
 public:
     static const std::string Type;
@@ -57,7 +57,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(FC_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -97,7 +101,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<FC_Op>::create(name)(*this);
+        SET_IMPL_MACRO(FC_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
@@ -128,4 +132,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index f6647f99151304d0cf083aed109cc642c9f1ecc2..142f6582a3afbc85ccd951fcfeff2a924a35e718 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -32,7 +32,7 @@ enum class GatherAttr { Indices, GatheredShape, Axis };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
+                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
                 public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> {
 
 public:
@@ -58,7 +58,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Gather_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -72,7 +76,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Gather_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Gather_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index c315e671c2f084af869e3b21107066137496366b..20b0cdc4aa8a42043c37851ef110427a561e5e1d 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -110,8 +110,8 @@ public:
  * @brief Fictive custom operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
  * @param type Type of the fictive operator.
- * @param nbDataIn Number of input data.
- * @param nbIn Number input data + number of learnt parameters.
+ * @param nbData Number of input data.
+ * @param nbParam Number of parameters.
  * @param nbOut Number of output data.
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 5976f1d88d70ae7fb716f4038e57da95242c3551..c48b85b4a7af71fde0f8136732597e098c966839 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -30,7 +30,7 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
+    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
     public StaticAttributes<LeakyReLUAttr, float> {
 public:
     static const std::string Type;
@@ -54,7 +54,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -68,7 +72,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
+        SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index a011c8666bba55eb7254a8efcd432a3f680cd461..596aa634693941d8e3a23ac955281cfd131e56ef 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -27,7 +27,7 @@ namespace Aidge {
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)> {
+                                 std::shared_ptr<OperatorImpl>(const MatMul_Op &)> {
 public:
     static const std::string Type;
 
@@ -65,7 +65,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
-        mImpl = Registrar<MatMul_Op>::create(name)(*this);
+        SET_IMPL_MACRO(MatMul_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index b07fa38a41c664c4fcbf90227914264ec68390a0..06ac30158f80a946b9310a93c8f81cc3ee975c84 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -30,7 +30,7 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
@@ -64,7 +64,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -105,7 +109,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 62fb9897384673c695895b54557b4cf637aa2447..3652cf9697c6bcfea4befe4cdcdf5b9efff8b70c 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -72,4 +72,4 @@ inline std::shared_ptr<Node> Move(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 8758021a9c3de1707a96bbfafc21686ded8b7e40..75304078829475b1488640dc39aeee8b64f1c3e5 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -29,7 +29,7 @@ namespace Aidge {
  * @brief Tensor element-wise multiplication.
  */
 class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
+    public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> {
 public:
     static const std::string Type;
 
@@ -57,7 +57,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Mul_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Mul_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -74,4 +74,4 @@ inline std::shared_ptr<Node> Mul(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index a0d2292b7860baa60fe537698784d4d250c81f42..396c60e46127ee9312745a92f9112dbc0742a584 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -115,15 +115,21 @@ public:
     virtual void setDataType(const DataType& dataType) const = 0;
 
     /**
-     * @brief Set the a new OperatorImpl to the Operator
+     * @brief Set a new OperatorImpl to the Operator
      *
      */
     inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
 
     /**
-     * @brief Minimum amount of data from a specific input required by the
-     * implementation to be run.
+     * @brief Get the OperatorImpl of the Operator
      *
+     */
+    inline std::shared_ptr<OperatorImpl> getImpl() const noexcept {
+        return mImpl;
+    }
+
+    /**
+     * @brief Minimum amount of data from a specific input for one computation pass.
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
      */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index bb961295bfaad2999af01460c49833085ff50a92..dce2a6e9e5ea9e0c5fe9a841c587c1f7bbe36fc7 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -31,7 +31,7 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
+                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
                 public StaticAttributes<PadAttr,
                                        std::array<DimSize_t, 2*DIM>,
                                        PadBorderType,
@@ -98,7 +98,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index ba8d3d05877f9aa543518fff1d88f4e8a436b712..ec4eebf9ddba475310ba292dd5923ba50933545d 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
+    public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> {
 public:
     static const std::string Type;
 
@@ -40,7 +40,11 @@ public:
     Pow_Op(const Pow_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Pow_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -55,7 +59,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Pow_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Pow_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -72,4 +76,4 @@ inline std::shared_ptr<Node> Pow(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 0731498dd3e06541ed82a86a98c2ae0bb355f413..c9b1f6e4aa5d82006d4bed880151ac1a22a4882b 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -28,7 +28,7 @@ enum class ProdAttr { Constant };
 
 class Producer_Op
     : public OperatorTensor,
-      public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
+      public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
                                           const Producer_Op &)>,
       public StaticAttributes<ProdAttr, bool> {
 public:
@@ -67,9 +67,11 @@ public:
         for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
             mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
         }
-        mImpl = (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}))
-            ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this)
-            : std::make_shared<OperatorImpl>(*this);
+        if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+            SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = std::make_shared<OperatorImpl>(*this);
+        }
     }
 
     /**
@@ -92,9 +94,7 @@ public:
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        if (Registrar<Producer_Op>::exists({name})) {
-            mImpl = Registrar<Producer_Op>::create({name})(*this);
-        }
+        SET_IMPL_MACRO(Producer_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 0bb7cdffe421b973ae7c86b4569e7464b3cf6da4..5b8f5c4b819f9a2f8cf518bdc50c445fbce38102 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
+    public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> {
 public:
     static const std::string Type;
 
@@ -39,7 +39,11 @@ public:
     ReLU_Op(const ReLU_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -52,7 +56,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ReLU_Op>::create(name)(*this);
+        SET_IMPL_MACRO(ReLU_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -69,4 +73,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 5f07cddfa667e7e494defe38a5667332744c3e20..09f1d58359b265af58fd78ef8de54dd1944b5cf1 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -32,7 +32,7 @@ enum class ReduceMeanAttr { Axes, KeepDims };
 
 template <DimIdx_t DIM>
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
+                public Registrable<ReduceMean_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
                 public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> {
 
    public:
@@ -57,7 +57,11 @@ class ReduceMean_Op : public OperatorTensor,
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -99,7 +103,7 @@ class ReduceMean_Op : public OperatorTensor,
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 32d71d5adc3cfd92c9840dcb5bc61bfb6399c6db..8914bbc9a9f3748276ead32aba8cb023ba14b1b7 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 enum class ReshapeAttr { Shape };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)>,
+                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>,
                    public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> {
 
 public:
@@ -53,7 +53,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -67,7 +71,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Reshape_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Reshape_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 54f1d98d2f61d18dd821c9f0a6b574bb52b0c9f0..29ce0527a9b8b15c7b45c0b0241a83957abb5565 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Scaling_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -95,4 +99,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 4a073bc525640846c28d718d09741a67d499830e..363c3c2b4ec397fdd62dc3260b63a0cd6d6c0081 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -28,7 +28,7 @@ enum class SliceAttr { Starts, Ends, Axes };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>,
+      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
       public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> {
 public:
     static const std::string Type;
@@ -55,8 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Slice_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this)
-                         : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Slice_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
 public:
@@ -69,7 +72,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Slice_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Slice_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index ed6689dc97ef17276df260cd90649f2a75b10007..943f69a588ebfedf28ec5ebb3a782e7510fa710a 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -33,7 +33,7 @@ enum class SoftmaxAttr { AxisIdx };
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::unique_ptr<OperatorImpl>(const Softmax_Op&)>,
+                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
                 public StaticAttributes<SoftmaxAttr, int> {
 
 public:
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Softmax_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -67,7 +71,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Softmax_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Softmax_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 32adfdb93db1e9da857f4147efdcfe64bbb34475..dd3fa541b9fd5177ddd3b9e8bcd781c0ea3a1867 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Sqrt_Op : public OperatorTensor,
-    public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
+    public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -45,7 +45,11 @@ public:
     Sqrt_Op(const Sqrt_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Sqrt_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -57,7 +61,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Sqrt_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Sqrt_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 7d346457ead71724ba05da70b5bdf7ad145cbe0c..5683a9be5ea2278d92fe7da081f0c4a80ff9500d 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
+    public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
@@ -45,7 +45,11 @@ public:
     Sub_Op(const Sub_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sub_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Sub_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -60,7 +64,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Sub_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Sub_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -77,4 +81,4 @@ inline std::shared_ptr<Node> Sub(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 2262bec14bd2f00cda643ade0709f7f9d509fa22..b040fc907dd5ac1f40a8a1885d27364785ba9188 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -30,7 +30,7 @@ enum class TransposeAttr { OutputDimsOrder };
 
 template <DimIdx_t DIM>
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
+                public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
                 public StaticAttributes<TransposeAttr,
                                        std::array<DimSize_t, DIM>> {
 
@@ -56,7 +56,11 @@ class Transpose_Op : public OperatorTensor,
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -80,7 +84,7 @@ class Transpose_Op : public OperatorTensor,
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 4d604d520d3d8af532e196c7785896ddc1c242d0..a5bd260ec189ac998134b738ca1ae757f2a0038c 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -14,6 +14,9 @@
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h> // declare_registrable key can recquire stl
+#include <pybind11/functional.h>// declare_registrable allow binding of lambda fn
+
 #endif
 
 #include "aidge/utils/ErrorHandling.hpp"
@@ -27,6 +30,9 @@ namespace Aidge {
 namespace py = pybind11;
 #endif
 
+// Abstract class used to test if a class is Registrable.
+class AbstractRegistrable {};
+
 template <class DerivedClass, class Key, class Func> // curiously rucurring template pattern
 class Registrable {
 public:
@@ -58,8 +64,10 @@ struct Registrar {
 
     Registrar(const registrar_key& key, registrar_type func) {
         //fmt::print("REGISTRAR: {}\n", key);
-        bool newInsert;
-        std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func));
+        // bool newInsert;
+        // std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func));
+        C::registry().erase(key);
+        C::registry().insert(std::make_pair(key, func));
         //assert(newInsert && "registrar already exists");
     }
 
@@ -81,6 +89,62 @@ struct Registrar {
         return keys;
     }
 };
+
+#ifdef PYBIND
+/**
+ * @brief Function to define register function for a registrable class
+ * Defined here to have access to this function in every module who wants
+ * to create a new registrable class.
+ *
+ * @tparam C registrable class
+ * @param m pybind module
+ * @param class_name python name of the class
+ */
+template <class C>
+void declare_registrable(py::module& m, const std::string& class_name){
+    typedef typename C::registrar_key registrar_key;
+    typedef typename C::registrar_type registrar_type;
+    m.def(("register_"+ class_name).c_str(), [](registrar_key& key, registrar_type function){
+        Registrar<C>(key, function);
+    })
+    .def(("get_keys_"+ class_name).c_str(), [](){
+        return Registrar<C>::getKeys();
+    });
+}
+#endif
+
+/*
+* This macro allow to set an implementation to an operator
+* This macro is mandatory for using implementation registered in python
+* PyBind when calling create method will do a call to the copy ctor if
+* op is not visible to the python world (if the create method return a python function)
+* See this issue for more information https://github.com/pybind/pybind11/issues/4417
+* Note: using a method to do this is not possible has any call to a function will call
+* the cpy ctor. This is why I used a macro
+* Note: I duplicated
+*             (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+* This is because the py::cast need to be done in the same scope.
+* I know this only empyrically not sure what happens under the hood...
+*
+* If someone wants to find an alternative to this Macro, you can contact me:
+*   cyril.moineau@cea.fr
+*/
+#ifdef PYBIND
+#define SET_IMPL_MACRO(T_Op, op, backend_name) \
+     \
+        if(Py_IsInitialized()) { \
+            auto obj = py::cast(&(op)); \
+            (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        } else { \
+            (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        }
+#else
+#define SET_IMPL_MACRO(T_Op, op, backend_name)                          \
+    if (Registrar<T_Op>::exists(backend_name)) {                        \
+        (op).setImpl(Registrar<T_Op>::create(backend_name)(op));        \
+    }
+#endif
+
 }
 
 #endif //AIDGE_CORE_UTILS_REGISTRAR_H_
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index a2a5e6b8bb2d0f2413ef94c360b383608c5b41b5..91d65484a122d6a651758e16eb0e925b6e0bfdd0 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -116,7 +116,7 @@ public:
 void init_OperatorImpl(py::module& m){
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
-    .def(py::init<const Operator&>())
+    .def(py::init<const Operator&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>())
     .def("forward", &OperatorImpl::forward)
     .def("backward", &OperatorImpl::backward)
     .def("get_nb_required_data", &OperatorImpl::getNbRequiredData)
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 74ec11c28e746856fe767f16a4380651271d8fe4..661c96bb835fa3ac719ab10dbf83e4137f1bb248 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -23,7 +23,7 @@ void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
   .def("get_inputs_name", &Add_Op::getInputsName)
   .def("get_outputs_name", &Add_Op::getOutputsName);
-
+  declare_registrable<Add_Op>(m, "AddOp");
   m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
 }
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 0ca01c07535f65ac1161603d32d191881eb28746..c44c7b49ade1e47438f80f0b3f3a83c18eb4e0fa 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -26,8 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
+  const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
@@ -36,7 +37,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
   .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
   .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
   .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index e11fc288fb9eb837c0a7b36c0a1c4024ab6c8633..7020c35f63880e77ecd3c2011a1b3c74bed847ed 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,13 +21,12 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
-    .def(py::init<float, float>(),
-            py::arg("epsilon"),
-            py::arg("momentum"))
+    const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
     .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
     .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+    declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 8cdd138b8cde2a582e9f569a17ae33811637092c..38d8a20cba1eafc255b1da313d35ad8be116620d 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -24,6 +24,7 @@ void init_Concat(py::module& m) {
     .def("get_outputs_name", &Concat_Op::getOutputsName)
     .def("attributes_name", &Concat_Op::staticGetAttrsName);
 
+    declare_registrable<Concat_Op>(m, "ConcatOp");
     m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 346acc5d9d05c24e9538c3b8c5edf1f7e37d6ba8..aea402017622655a577ac4f9e207141bff01d70d 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -19,13 +19,15 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
+  const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
   py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
                 DimSize_t,
@@ -41,6 +43,8 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
     .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
     .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
     ;
+  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
+
 
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
@@ -66,9 +70,5 @@ void init_Conv(py::module &m) {
   declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
   declare_ConvOp<3>(m);
-
-  // FIXME:
-  // m.def("Conv1D", static_cast<NodeAPI(*)(const char*, int, int, int const
-  // (&)[1])>(&Conv));
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index e25024e09cdd4fe234416a9aa8f0fef91a3c27fe..83eac8742628bf2e0921e6a17dd46226c46fbea1 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -26,8 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
+  const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const DimSize_t,
                 const std::array<DimSize_t, DIM> &,
@@ -40,7 +41,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
   .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                                   const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 6d14510f34349c001289096a7fc9b08681a25bc8..2996e0bcae6d69d9ad2ef0d4d8eee8489cd8cdc8 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -21,7 +21,7 @@ void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
     .def("get_inputs_name", &Div_Op::getInputsName)
     .def("get_outputs_name", &Div_Op::getOutputsName);
-
+    declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 806867f61c3580543c184d529edc2856ee8d7a6c..e1aef08ad597d92c4cf4b6d5a2cff487e438538e 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -21,7 +21,7 @@ void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
     .def("get_inputs_name", &Erf_Op::getInputsName)
     .def("get_outputs_name", &Erf_Op::getOutputsName);
-
+    declare_registrable<Erf_Op>(m, "ErfOp");
     m.def("Erf", &Erf, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index ad589d73d0aea94d96e62e8065b70bd517633f88..0b13643cbd3ebb265dab62a1030729fca62dea62 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -24,7 +24,7 @@ void declare_FC(py::module &m) {
   .def("get_inputs_name", &FC_Op::getInputsName)
   .def("get_outputs_name", &FC_Op::getOutputsName)
   .def("attributes_name", &FC_Op::staticGetAttrsName);
-
+  declare_registrable<FC_Op>(m, "FCOp");
   m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
 
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index f0d55e2f40bd89269c96564cea6b5a002b477b8b..db6bdb15a2e6288b5f775d538a5e14f15d79d2c1 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -23,7 +23,7 @@ void init_Gather(py::module& m) {
     .def("get_inputs_name", &Gather_Op::getInputsName)
     .def("get_outputs_name", &Gather_Op::getOutputsName)
     .def("attributes_name", &Gather_Op::staticGetAttrsName);
-
-    m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
+    declare_registrable<Gather_Op>(m, "GatherOp");
+    m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis")= 0, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 3e9acb831eb3334bd126d3b360f3b5aa39d83731..66b2c34a9a558d20d90f71dd590d9fe8c370c10d 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -22,7 +22,7 @@ void init_LeakyReLU(py::module& m) {
     .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
     .def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
     .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
-
+    declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index d0d7f28d52a9a9899b08d37a0c1a4a8720f2ae20..383bad54be08905c5e9248ab3f7bf5c83bddc836 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -23,7 +23,7 @@ void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
   .def("get_outputs_name", &MatMul_Op::getOutputsName);
-
+  declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 9c83a67e81120e2cc2674e3ceb4c8871dd6fd393..8a5e3db9decd01bd5fabe5897847f939e7fa02b3 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,6 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
   py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
   .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 21f510d98728fbe5401288a366294241b5f10a3f..5354f01ca508eb6ff04304d1f4072f431339973c 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -21,7 +21,7 @@ void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def("get_inputs_name", &Mul_Op::getInputsName)
     .def("get_outputs_name", &Mul_Op::getOutputsName);
-
+    declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 79a85cb92cf27c7edb745c36eefe61ae86c66786..05d6cd089754d1155e1506b4a491af7919bc4d31 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -1,3 +1,4 @@
+
 /********************************************************************************
  * Copyright (c) 2023 CEA-List
  *
@@ -32,10 +33,11 @@ void init_Operator(py::module& m){
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
     .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
     .def("forward", &Operator::forward)
-    // py::keep_alive forbide Python to garbage collect implementation will the Operator is not garbade collected !
+    // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
+    .def("get_impl", &Operator::getImpl)
     .def("get_hook", &Operator::getHook)
     .def("add_hook", &Operator::addHook)
     ;
 }
-}
\ No newline at end of file
+}
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 69d63fe7b8d31a6fa9747df2ce4a93ec4a0f4cac..d784a0d6ab7803bbc078b12b39df9ad8ef2f768e 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -25,8 +25,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
+  const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
   py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
-    m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
                 const PadBorderType &,
@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
     .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
     .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
     ;
-
+  declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
                                                         const std::string& name,
                                                         const PadBorderType &borderType = PadBorderType::Constant,
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index 09d1e4ad2ad6413901c28bc9d9fe16995483da05..03e822adbd326b6ad9693d58b53cd9f8f4bc3ac8 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -21,6 +21,7 @@ void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def("get_inputs_name", &Pow_Op::getInputsName)
     .def("get_outputs_name", &Pow_Op::getOutputsName);
+    declare_registrable<Pow_Op>(m, "PowOp");
 
     m.def("Pow", &Pow, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 3caa438d18b3919dbedcf66e4ba53b92b84a50b5..025c8c5dd1651b3466a22e88f0966a7f51d2c109 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -26,6 +26,7 @@ void declare_Producer(py::module &m) {
     // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&, bool)>(&Producer), py::arg("dims"), py::arg("name") = "", py::arg("constant") = false);
 
+
 }
 
 
@@ -39,7 +40,7 @@ void init_Producer(py::module &m) {
     .def("get_outputs_name", &Producer_Op::getOutputsName)
     .def("attributes_name", &Producer_Op::staticGetAttrsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
-
+    declare_registrable<Producer_Op>(m, "ProducerOp");
     declare_Producer<1>(m);
     declare_Producer<2>(m);
     declare_Producer<3>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 24ae96649a87ff9acc996715d3cd00a97c393578..f08c67cb98b629b8d1b61471c6f50a0de4c421d6 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -21,6 +21,7 @@ void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &ReLU_Op::getInputsName)
     .def("get_outputs_name", &ReLU_Op::getOutputsName);
+    declare_registrable<ReLU_Op>(m, "ReLUOp");
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 11e979736dcab211aa11758cb3138f9d6827cc4e..fbec6864042cf16a877faa67b351be5eb3f9b1eb 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -24,12 +24,14 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
+  const std::string pyClassName("ReduceMeanOp" + std::to_string(DIM) + "D");
   py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    m, pyClassName.c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
     .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
     .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
     ;
+  declare_registrable<ReduceMean_Op<DIM>>(m, pyClassName);
 
   m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
                                                                 DimSize_t keepDims,
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index b3e9850a54a36e440876dace2b635a122c63b4af..dc6a9b4ec5de297df7c1c52877974ab84d55a0c2 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -21,7 +21,7 @@ void init_Reshape(py::module& m) {
     py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
     .def("get_inputs_name", &Reshape_Op::getInputsName)
     .def("get_outputs_name", &Reshape_Op::getOutputsName);
-
+    declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 7bfd1b4f00579ed29658db73b71f2c596048fe75..3bb1b082c19b98447726b0fb980cbd8688fd5ba3 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -21,7 +21,7 @@ void init_Slice(py::module& m) {
     py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
     .def("get_inputs_name", &Slice_Op::getInputsName)
     .def("get_outputs_name", &Slice_Op::getOutputsName);
-
+    declare_registrable<Slice_Op>(m, "SliceOp");
     m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 780cffdef695b71dbc2781ba30936b3b45657cbb..bac553387a00856f2d4e01dea95e630a59666938 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -23,7 +23,7 @@ void init_Softmax(py::module& m) {
     .def("get_inputs_name", &Softmax_Op::getInputsName)
     .def("get_outputs_name", &Softmax_Op::getOutputsName)
     .def("attributes_name", &Softmax_Op::staticGetAttrsName);
-
+    declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 98d65242e8ff199992bbfc740192ae25e6d7b738..33d46e02caee1046cbbdbaaa186c4898db5b10c1 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -21,7 +21,7 @@ void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sqrt_Op::getInputsName)
     .def("get_outputs_name", &Sqrt_Op::getOutputsName);
-
+    declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index dce1ab6cb27cc7da02e6c817a6bc49ec64bcf364..1b858d1527eb3969e2acad9c0206311ff2981f17 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -21,7 +21,7 @@ void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sub_Op::getInputsName)
     .def("get_outputs_name", &Sub_Op::getOutputsName);
-
+    declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index f5fbaf0e75ddd81265fd17e0aeb18b54f3908627..59482cf481849738ed0656d8c55188b2ade51954 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -27,12 +27,15 @@ namespace Aidge {
 
 template <DimIdx_t DIM>
 void declare_Transpose(py::module &m) {
+  const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D");
   py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
   .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
   .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
   .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
 
+  declare_registrable<Transpose_Op<DIM>>(m, pyClassName);
+
   m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
                                                                   const std::string& name) {
         AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);