diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 97a4ef69bd371e80c4e63303feac5e64197670b3..58ff87cf7d79f47ba627f50edd7bff04e9cb8918 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -28,7 +28,7 @@
 namespace Aidge {
 
 class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
+    public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> {
 public:
     static const std::string Type;
 
@@ -71,7 +71,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Add_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Add_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index a2098ff36b40b78eb12a36fe28793e8dd73d9d9c..d6b4fdde552d69d68e3fc6e1548865444dfc6fda 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -30,7 +30,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
                 public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>> {
@@ -137,7 +137,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -177,4 +177,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 4a0f40c034c7738a33eb8a9569fac4aa2fff465d..7d57a903327462e331ef0f14e2f09146fab11cc4 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -30,7 +30,7 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
                 public StaticAttributes<BatchNormAttr, float, float> {
 public:
     static const std::string Type;
@@ -95,7 +95,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for scale, shift, mean and variance
@@ -136,4 +136,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 06cc468bd7266bbcfeb6802f274c536ec09867fc..9f237b063a33963ec35da81647e801f2ad40f23a 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -29,7 +29,7 @@ namespace Aidge {
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
+    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
     public StaticAttributes<ConcatAttr, DimSize_t> {
 public:
     static const std::string Type;
@@ -102,7 +102,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Concat_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Concat_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 9d0c0bf408a2f634f96881cd339c330340d5e344..432623d300adce23f1d0cf45f1286721c37490b1 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -30,7 +30,7 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
@@ -168,7 +168,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index a033c6920a374003ad869bddbf5641c48fc5f6e2..b998e9ee22d69da2e449fe5f7b365284d3c156e6 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
+    public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> {
 
 public:
     static const std::string Type;
@@ -55,7 +55,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Div_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Div_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 6995cea5e4af9a17cf3d24516d9840850e701669..895d58a8768baf6e20ce2dd34233c57432bdbfed 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> {
+    public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> {
 public:
     static const std::string Type;
 
@@ -52,7 +52,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Erf_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Erf_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index a73734ad20e10fe2a3e1d0d12d40e584b4540fb4..61735b99a89975a6b70ecedab41e5ef9b1bed2a2 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -32,7 +32,7 @@ enum class FCAttr { OutChannels, NoBias };
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const FC_Op &)>,
+                                 std::shared_ptr<OperatorImpl>(const FC_Op &)>,
               public StaticAttributes<FCAttr, DimSize_t, bool> {
 public:
     static const std::string Type;
@@ -96,7 +96,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<FC_Op>::create(name)(*this);
+        SET_IMPL_MACRO(FC_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
@@ -127,4 +127,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index f6647f99151304d0cf083aed109cc642c9f1ecc2..1e5957e8360004abddf82a71a46ba1bfbc58a1ef 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -32,7 +32,7 @@ enum class GatherAttr { Indices, GatheredShape, Axis };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
+                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
                 public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> {
 
 public:
@@ -72,7 +72,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Gather_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Gather_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 5976f1d88d70ae7fb716f4038e57da95242c3551..d0fd2733ac49744046248ddd7f138a791a1563a1 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -30,7 +30,7 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
+    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
     public StaticAttributes<LeakyReLUAttr, float> {
 public:
     static const std::string Type;
@@ -69,6 +69,7 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
         mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
+        SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index a011c8666bba55eb7254a8efcd432a3f680cd461..596aa634693941d8e3a23ac955281cfd131e56ef 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -27,7 +27,7 @@ namespace Aidge {
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)> {
+                                 std::shared_ptr<OperatorImpl>(const MatMul_Op &)> {
 public:
     static const std::string Type;
 
@@ -65,7 +65,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
-        mImpl = Registrar<MatMul_Op>::create(name)(*this);
+        SET_IMPL_MACRO(MatMul_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 467a69d73c98a21c85e956acf42536e197833cbd..6c4065f3f22f9d5b52237facf3fc4f53e9062bee 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -30,7 +30,7 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
@@ -105,7 +105,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 8758021a9c3de1707a96bbfafc21686ded8b7e40..75304078829475b1488640dc39aeee8b64f1c3e5 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -29,7 +29,7 @@ namespace Aidge {
  * @brief Tensor element-wise multiplication.
  */
 class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
+    public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> {
 public:
     static const std::string Type;
 
@@ -57,7 +57,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Mul_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Mul_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -74,4 +74,4 @@ inline std::shared_ptr<Node> Mul(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 56245dd2dfd62d4dc765de6e3d43b08c144cc62b..716164ef9f026558bc8eebeb26f6c14b9c858296 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -31,7 +31,7 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
+                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
                 public StaticAttributes<PadAttr,
                                        std::array<DimSize_t, 2*DIM>,
                                        PadBorderType,
@@ -98,7 +98,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index ba8d3d05877f9aa543518fff1d88f4e8a436b712..e8894d1a2418402e4087bbfda07b98ad0cb1d1fc 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
+    public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> {
 public:
     static const std::string Type;
 
@@ -55,7 +55,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Pow_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Pow_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -72,4 +72,4 @@ inline std::shared_ptr<Node> Pow(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 0bb7cdffe421b973ae7c86b4569e7464b3cf6da4..7a5144f48e38777801fcf1082c05c488e734f492 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
+    public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> {
 public:
     static const std::string Type;
 
@@ -52,7 +52,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ReLU_Op>::create(name)(*this);
+        SET_IMPL_MACRO(ReLU_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -69,4 +69,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 5f07cddfa667e7e494defe38a5667332744c3e20..70fe036351f38e44b10b66564a9bfe24aa4b89d1 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -32,7 +32,7 @@ enum class ReduceMeanAttr { Axes, KeepDims };
 
 template <DimIdx_t DIM>
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
+                public Registrable<ReduceMean_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
                 public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> {
 
    public:
@@ -99,7 +99,7 @@ class ReduceMean_Op : public OperatorTensor,
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 32d71d5adc3cfd92c9840dcb5bc61bfb6399c6db..410f55e5bbad43e294ae942d68fc2543449373b1 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 enum class ReshapeAttr { Shape };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)>,
+                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>,
                    public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> {
 
 public:
@@ -67,7 +67,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Reshape_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Reshape_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 4a073bc525640846c28d718d09741a67d499830e..3635eb32c3e26195d4cf9979f3213f0bba487134 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -28,7 +28,7 @@ enum class SliceAttr { Starts, Ends, Axes };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>,
+      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
       public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> {
 public:
     static const std::string Type;
@@ -69,7 +69,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Slice_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Slice_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index ed6689dc97ef17276df260cd90649f2a75b10007..a63827a858233b7f73a186efb0c4ee1bf5a340cf 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -33,7 +33,7 @@ enum class SoftmaxAttr { AxisIdx };
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::unique_ptr<OperatorImpl>(const Softmax_Op&)>,
+                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
                 public StaticAttributes<SoftmaxAttr, int> {
 
 public:
@@ -67,7 +67,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Softmax_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Softmax_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 32adfdb93db1e9da857f4147efdcfe64bbb34475..69a1ffba967067cd998e797a75a9ab3958a51988 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Sqrt_Op : public OperatorTensor,
-    public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
+    public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -57,7 +57,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Sqrt_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Sqrt_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 7d346457ead71724ba05da70b5bdf7ad145cbe0c..721b68a44f813cd72fa3d62dc18b545a31bfc4a6 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
+    public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
@@ -60,7 +60,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Sub_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Sub_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -77,4 +77,4 @@ inline std::shared_ptr<Node> Sub(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 2262bec14bd2f00cda643ade0709f7f9d509fa22..f081f830aa0d98ab6aa60f0632939010367b790d 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -30,7 +30,7 @@ enum class TransposeAttr { OutputDimsOrder };
 
 template <DimIdx_t DIM>
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
+                public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
                 public StaticAttributes<TransposeAttr,
                                        std::array<DimSize_t, DIM>> {
 
@@ -80,7 +80,7 @@ class Transpose_Op : public OperatorTensor,
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 74ec11c28e746856fe767f16a4380651271d8fe4..661c96bb835fa3ac719ab10dbf83e4137f1bb248 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -23,7 +23,7 @@ void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
   .def("get_inputs_name", &Add_Op::getInputsName)
   .def("get_outputs_name", &Add_Op::getOutputsName);
-
+  declare_registrable<Add_Op>(m, "AddOp");
   m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
 }
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index dc586b7d947c6d8433fabe2fbfaa0990de5c132a..b11d2c5ec438b5d1405fa5f576eba44c58aa77ef 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -26,8 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
+  const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
@@ -36,7 +37,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
   .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
   .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
   .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index c81c7ade4de50e6879fd32c59f6574b14c473398..7020c35f63880e77ecd3c2011a1b3c74bed847ed 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,10 +21,12 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
     .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
     .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+    declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 8cdd138b8cde2a582e9f569a17ae33811637092c..38d8a20cba1eafc255b1da313d35ad8be116620d 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -24,6 +24,7 @@ void init_Concat(py::module& m) {
     .def("get_outputs_name", &Concat_Op::getOutputsName)
     .def("attributes_name", &Concat_Op::staticGetAttrsName);
 
+    declare_registrable<Concat_Op>(m, "ConcatOp");
     m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index d858336b6578b580378778f64984ba565e28f941..7ac26624cb31dba42d72a000bda8984579f0a5a9 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -26,8 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
+  const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const DimSize_t,
                 const std::array<DimSize_t, DIM> &,
@@ -40,7 +41,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
   .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                                   const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 6d14510f34349c001289096a7fc9b08681a25bc8..2996e0bcae6d69d9ad2ef0d4d8eee8489cd8cdc8 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -21,7 +21,7 @@ void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
     .def("get_inputs_name", &Div_Op::getInputsName)
     .def("get_outputs_name", &Div_Op::getOutputsName);
-
+    declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 806867f61c3580543c184d529edc2856ee8d7a6c..e1aef08ad597d92c4cf4b6d5a2cff487e438538e 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -21,7 +21,7 @@ void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
     .def("get_inputs_name", &Erf_Op::getInputsName)
     .def("get_outputs_name", &Erf_Op::getOutputsName);
-
+    declare_registrable<Erf_Op>(m, "ErfOp");
     m.def("Erf", &Erf, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index ad589d73d0aea94d96e62e8065b70bd517633f88..0b13643cbd3ebb265dab62a1030729fca62dea62 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -24,7 +24,7 @@ void declare_FC(py::module &m) {
   .def("get_inputs_name", &FC_Op::getInputsName)
   .def("get_outputs_name", &FC_Op::getOutputsName)
   .def("attributes_name", &FC_Op::staticGetAttrsName);
-
+  declare_registrable<FC_Op>(m, "FCOp");
   m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
 
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index f0d55e2f40bd89269c96564cea6b5a002b477b8b..a67dd6c1320bd9bcbc8c96af179319f9f5184ecc 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -23,7 +23,7 @@ void init_Gather(py::module& m) {
     .def("get_inputs_name", &Gather_Op::getInputsName)
     .def("get_outputs_name", &Gather_Op::getOutputsName)
     .def("attributes_name", &Gather_Op::staticGetAttrsName);
-
+    declare_registrable<Gather_Op>(m, "GatherOp");
     m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 3e9acb831eb3334bd126d3b360f3b5aa39d83731..66b2c34a9a558d20d90f71dd590d9fe8c370c10d 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -22,7 +22,7 @@ void init_LeakyReLU(py::module& m) {
     .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
     .def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
     .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
-
+    declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index d0d7f28d52a9a9899b08d37a0c1a4a8720f2ae20..383bad54be08905c5e9248ab3f7bf5c83bddc836 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -23,7 +23,7 @@ void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
   .def("get_outputs_name", &MatMul_Op::getOutputsName);
-
+  declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 485e0eaf6e6e68367ae9037fd922da07433a76e3..689108116f0e4c62948a097e1c209d29d4c8ce1f 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,6 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
   py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
   .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 21f510d98728fbe5401288a366294241b5f10a3f..5354f01ca508eb6ff04304d1f4072f431339973c 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -21,7 +21,7 @@ void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def("get_inputs_name", &Mul_Op::getInputsName)
     .def("get_outputs_name", &Mul_Op::getOutputsName);
-
+    declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index df3fdc297ce44cf96ff26bffb4cd96fa1fe8fe22..62d4547b0c9d98477aff7c4712c72478688b4382 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -25,8 +25,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
+  const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
   py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
-    m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
                 const PadBorderType &,
@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
     .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
     .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
     ;
-
+  declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
                                                         const std::string& name,
                                                         const PadBorderType &borderType = PadBorderType::Constant,
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index 09d1e4ad2ad6413901c28bc9d9fe16995483da05..03e822adbd326b6ad9693d58b53cd9f8f4bc3ac8 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -21,6 +21,7 @@ void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def("get_inputs_name", &Pow_Op::getInputsName)
     .def("get_outputs_name", &Pow_Op::getOutputsName);
+    declare_registrable<Pow_Op>(m, "PowOp");
 
     m.def("Pow", &Pow, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 24ae96649a87ff9acc996715d3cd00a97c393578..f08c67cb98b629b8d1b61471c6f50a0de4c421d6 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -21,6 +21,7 @@ void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &ReLU_Op::getInputsName)
     .def("get_outputs_name", &ReLU_Op::getOutputsName);
+    declare_registrable<ReLU_Op>(m, "ReLUOp");
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 1a50edba03f62e6c43ff60320fe4c3d5caa65f41..cac1269912e2194d0334a7572d55d1223e249fe9 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -24,12 +24,14 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
+  const std::string pyClassName("ReduceMeanOp" + std::to_string(DIM) + "D");
   py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    m, pyClassName.c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
     .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
     .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
     ;
+  declare_registrable<ReduceMean_Op<DIM>>(m, pyClassName);
 
   m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
                                                                 DimSize_t keepDims,
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index d34a411c719bdbb1144edaa65b50050d705e0d90..e79f969debc27c46aa3f076cc7b275fa6e760ea0 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -21,7 +21,7 @@ void init_Reshape(py::module& m) {
     py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
     .def("get_inputs_name", &Reshape_Op::getInputsName)
     .def("get_outputs_name", &Reshape_Op::getOutputsName);
-
+    declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 7bfd1b4f00579ed29658db73b71f2c596048fe75..3bb1b082c19b98447726b0fb980cbd8688fd5ba3 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -21,7 +21,7 @@ void init_Slice(py::module& m) {
     py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
     .def("get_inputs_name", &Slice_Op::getInputsName)
     .def("get_outputs_name", &Slice_Op::getOutputsName);
-
+    declare_registrable<Slice_Op>(m, "SliceOp");
     m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 780cffdef695b71dbc2781ba30936b3b45657cbb..bac553387a00856f2d4e01dea95e630a59666938 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -23,7 +23,7 @@ void init_Softmax(py::module& m) {
     .def("get_inputs_name", &Softmax_Op::getInputsName)
     .def("get_outputs_name", &Softmax_Op::getOutputsName)
     .def("attributes_name", &Softmax_Op::staticGetAttrsName);
-
+    declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 98d65242e8ff199992bbfc740192ae25e6d7b738..33d46e02caee1046cbbdbaaa186c4898db5b10c1 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -21,7 +21,7 @@ void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sqrt_Op::getInputsName)
     .def("get_outputs_name", &Sqrt_Op::getOutputsName);
-
+    declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index dce1ab6cb27cc7da02e6c817a6bc49ec64bcf364..1b858d1527eb3969e2acad9c0206311ff2981f17 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -21,7 +21,7 @@ void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sub_Op::getInputsName)
     .def("get_outputs_name", &Sub_Op::getOutputsName);
-
+    declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index d535a2c932c8d61c0395f03ffc0978caf7ad692f..109177fb8ca60760d82a0ace09e17bd8d164942e 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -27,12 +27,15 @@ namespace Aidge {
 
 template <DimIdx_t DIM>
 void declare_Transpose(py::module &m) {
+  const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D");
   py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
   .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
   .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
   .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
 
+  declare_registrable<Transpose_Op<DIM>>(m, pyClassName);
+
   m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
                                                                   const std::string& name) {
         AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [%ld] does not match DIM [%d]", output_dims_order.size(), DIM);