diff --git a/aidge_core/unit_tests/test_impl.py b/aidge_core/unit_tests/test_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad7ee666ebb56941cdc426220cd117a0e3f8b8d1
--- /dev/null
+++ b/aidge_core/unit_tests/test_impl.py
@@ -0,0 +1,72 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+from functools import reduce
+
+import numpy as np
+
+GLOBAL_CPT = 0
+
+class testImpl(aidge_core.OperatorImpl):
+    def __init__(self, op: aidge_core.Operator):
+        aidge_core.OperatorImpl.__init__(self, op) # Required to avoid type error !
+
+    def forward(self):
+        global GLOBAL_CPT
+        GLOBAL_CPT += 1
+
+class test_OperatorImpl(unittest.TestCase):
+    """Test Op
+    """
+    def setUp(self):
+        global GLOBAL_CPT
+        GLOBAL_CPT = 0
+    def tearDown(self):
+        pass
+
+    def test_setImplementation(self):
+        """Test setting an implementation manually
+        """
+        global GLOBAL_CPT
+        matmul = aidge_core.GenericOperator("MatMul", 1, 0, 1, name="MatMul0")
+        generic_matmul_op = matmul.get_operator()
+        generic_matmul_op.set_compute_output_dims(lambda x: x)
+        generic_matmul_op.set_impl(testImpl(generic_matmul_op))
+        generic_matmul_op.forward()
+        self.assertEqual(GLOBAL_CPT, 1)
+
+    def test_Registrar_setOp(self):
+        """Test registering an implementation
+        """
+        global GLOBAL_CPT
+        aidge_core.register_ConvOp2D("cpu", testImpl)
+        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
+        conv.get_operator().set_backend("cpu")
+        conv.get_operator().forward()
+        self.assertEqual(GLOBAL_CPT, 1)
+
+    def test_Registrar_setGraphView(self):
+        """Test registering an implementation
+        """
+        global GLOBAL_CPT
+        aidge_core.register_ConvOp2D("cpu", testImpl)
+        aidge_core.register_ProducerOp("cpu", testImpl)
+        self.assertTrue("cpu" in aidge_core.get_keys_ConvOp2D())
+        conv = aidge_core.Conv2D(2,2,[1,1], name="Conv0")
+        model = aidge_core.sequential([conv])
+        model.set_backend("cpu")
+        conv.get_operator().forward()
+        self.assertEqual(GLOBAL_CPT, 1)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 3c27e118a384debdaf5505aec4ab993f260a97de..9c516575690fbca947496920c7068874bda6bf63 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -1,77 +1,75 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#ifndef AIDGE_IMPORTS_H_
-#define AIDGE_IMPORTS_H_
-
-#include "aidge/backend/OperatorImpl.hpp"
-#include "aidge/backend/TensorImpl.hpp"
-#include "aidge/backend/StimulusImpl.hpp"
-
-#include "aidge/backend/cpu/data/TensorImpl.hpp"
-#include "aidge/backend/cpu/data/GetCPUPtr.h"
-
-#include "aidge/data/Data.hpp"
-#include "aidge/data/Tensor.hpp"
-#include "aidge/data/Database.hpp"
-#include "aidge/data/DataProvider.hpp"
-#include "aidge/graph/Connector.hpp"
-#include "aidge/graph/GraphView.hpp"
-#include "aidge/graph/Node.hpp"
-#include "aidge/graph/OpArgs.hpp"
-#include "aidge/graphmatching/Match.hpp"
-#include "aidge/graphmatching/NodeRegex.hpp"
-#include "aidge/graphmatching/SeqStm.hpp"
-#include "aidge/graphmatching/StmFactory.hpp"
-#include "aidge/graphmatching/Utile.hpp"
-
-#include "aidge/operator/Add.hpp"
-#include "aidge/operator/AvgPooling.hpp"
-#include "aidge/operator/BatchNorm.hpp"
-#include "aidge/operator/Concat.hpp"
-#include "aidge/operator/Conv.hpp"
-#include "aidge/operator/ConvDepthWise.hpp"
-#include "aidge/operator/Div.hpp"
-#include "aidge/operator/Erf.hpp"
-#include "aidge/operator/FC.hpp"
-#include "aidge/operator/Gather.hpp"
-#include "aidge/operator/GenericOperator.hpp"
-#include "aidge/operator/MatMul.hpp"
-#include "aidge/operator/MaxPooling.hpp"
-#include "aidge/operator/MetaOperator.hpp"
-#include "aidge/operator/MetaOperatorDefs.hpp"
-#include "aidge/operator/Mul.hpp"
-#include "aidge/operator/Operator.hpp"
-#include "aidge/operator/Pad.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/operator/Pow.hpp"
-#include "aidge/operator/ReduceMean.hpp"
-#include "aidge/operator/ReLU.hpp"
-#include "aidge/operator/Reshape.hpp"
-#include "aidge/operator/Scaling.hpp"
-#include "aidge/operator/Slice.hpp"
-#include "aidge/operator/Softmax.hpp"
-#include "aidge/operator/Sqrt.hpp"
-#include "aidge/operator/Sub.hpp"
-#include "aidge/operator/Transpose.hpp"
-#include "aidge/scheduler/Scheduler.hpp"
-#include "aidge/stimuli/Stimulus.hpp"
-
-#include "aidge/recipes/Recipes.hpp"
-
-#include "aidge/utils/Attributes.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/DynamicAttributes.hpp"
-#include "aidge/utils/Random.hpp"
-#include "aidge/utils/Registrar.hpp"
-#include "aidge/utils/Types.h"
-
-#endif /* AIDGE_IMPORTS_H_ */
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_IMPORTS_H_
+#define AIDGE_IMPORTS_H_
+
+#include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/StimulusImpl.hpp"
+
+#include "aidge/backend/cpu/data/TensorImpl.hpp"
+#include "aidge/backend/cpu/data/GetCPUPtr.h"
+
+#include "aidge/data/Data.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/data/Database.hpp"
+#include "aidge/data/DataProvider.hpp"
+#include "aidge/graph/Connector.hpp"
+#include "aidge/graph/GraphView.hpp"
+#include "aidge/graph/Node.hpp"
+#include "aidge/graph/OpArgs.hpp"
+
+#include "aidge/graphRegex/GraphRegex.hpp"
+#include "aidge/nodeTester/ConditionalInterpreter.hpp"
+
+#include "aidge/operator/Add.hpp"
+#include "aidge/operator/AvgPooling.hpp"
+#include "aidge/operator/BatchNorm.hpp"
+#include "aidge/operator/Concat.hpp"
+#include "aidge/operator/Conv.hpp"
+#include "aidge/operator/ConvDepthWise.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/Erf.hpp"
+#include "aidge/operator/FC.hpp"
+#include "aidge/operator/Gather.hpp"
+#include "aidge/operator/GenericOperator.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/MaxPooling.hpp"
+#include "aidge/operator/MetaOperator.hpp"
+#include "aidge/operator/MetaOperatorDefs.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/Operator.hpp"
+#include "aidge/operator/Pad.hpp"
+#include "aidge/operator/Producer.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/operator/ReduceMean.hpp"
+#include "aidge/operator/ReLU.hpp"
+#include "aidge/operator/Reshape.hpp"
+#include "aidge/operator/Scaling.hpp"
+#include "aidge/operator/Slice.hpp"
+#include "aidge/operator/Softmax.hpp"
+#include "aidge/operator/Sqrt.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/Transpose.hpp"
+#include "aidge/scheduler/Scheduler.hpp"
+#include "aidge/stimuli/Stimulus.hpp"
+
+#include "aidge/recipes/Recipes.hpp"
+
+#include "aidge/utils/Attributes.hpp"
+#include "aidge/utils/StaticAttributes.hpp"
+#include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/utils/Random.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Types.h"
+
+#endif /* AIDGE_IMPORTS_H_ */
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 3311797d858cf4899a6cfed7a18fb9840afb514e..46fa56ef0e7d63ce10bb3c96a8d7e1c42b191322 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -62,10 +62,10 @@ public:
         return mNodes == gv.mNodes;
     }
 
-    NodePtr operator[](const std::string& name)
+    NodePtr operator[](const std::string& nodeName)
     {
-        assert(mNodeRegistry.find(name) != mNodeRegistry.end() && "Could not find Node in the GraphView.");
-        return mNodeRegistry.at(name);
+        AIDGE_ASSERT(mNodeRegistry.find(nodeName) != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name());
+        return mNodeRegistry.at(nodeName);
     }
 
 ///////////////////////////////////////////////////////
@@ -379,11 +379,10 @@ public:
      * @param toTensor Input Tensor ID of the new Node. Default to gk_IODefaultIndex, meaning
      * first available data input for the Node.
      */
-    inline void addChild(NodePtr toOtherNode, std::string fromOutNodeName,
+    inline void addChild(NodePtr toOtherNode, const std::string& fromOutNodeName,
                          const IOIndex_t fromTensor = IOIndex_t(0),
                          IOIndex_t toTensor = gk_IODefaultIndex) {
-        assert(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end() &&
-               "No Node with this name found in the GraphView.");
+        AIDGE_ASSERT(mNodeRegistry.find(fromOutNodeName) != mNodeRegistry.end(), "No node named {} in graph {}.", fromOutNodeName, name());
         addChild(toOtherNode, mNodeRegistry.at(fromOutNodeName), fromTensor, toTensor);
     }
 
@@ -524,7 +523,6 @@ private:
     //        TOPOLOGY
     ///////////////////////////////////////////////////////
 
-    void _forwardDims(std::set<NodePtr> listNodes);
 };
 
 /**
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 97a4ef69bd371e80c4e63303feac5e64197670b3..3115cedca1f2a3bcc4a1330b96e90669bf7611a2 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -28,7 +28,7 @@
 namespace Aidge {
 
 class Add_Op : public OperatorTensor,
-    public Registrable<Add_Op, std::string, std::unique_ptr<OperatorImpl>(const Add_Op&)> {
+    public Registrable<Add_Op, std::string, std::shared_ptr<OperatorImpl>(const Add_Op&)> {
 public:
     static const std::string Type;
 
@@ -47,7 +47,11 @@ public:
     Add_Op(const Add_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Add_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Add_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -71,7 +75,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Add_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Add_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 5066cb78f86bfc87d33fce4ecd8f302c40cb14d2..e427aac72ad3948d0d03f588c930cfccedfb1885 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -30,7 +30,7 @@ enum class AvgPoolingAttr { StrideDims, KernelDims };
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
-                public Registrable<AvgPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
+                public Registrable<AvgPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>,
                 public StaticAttributes<AvgPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>> {
@@ -60,7 +60,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<AvgPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -137,7 +141,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<AvgPooling_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -177,4 +181,4 @@ const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {"StrideDims",
                                                           "KernelDims"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 4a0f40c034c7738a33eb8a9569fac4aa2fff465d..83ad2dbbb695e42c11cb794c7d5bd4578056d941 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -30,7 +30,7 @@ enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
-                public Registrable<BatchNorm_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
+                public Registrable<BatchNorm_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>,
                 public StaticAttributes<BatchNormAttr, float, float> {
 public:
     static const std::string Type;
@@ -54,7 +54,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<BatchNorm_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -95,7 +99,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<BatchNorm_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for scale, shift, mean and variance
@@ -136,4 +140,4 @@ template <>
 const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
 }
 
-#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
\ No newline at end of file
+#endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 62a9540105d77866167d87b9733ed473e03f0151..450c40bd210e0a4be891e436f03330a984e221be 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -29,7 +29,7 @@ namespace Aidge {
 enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
-    public Registrable<Concat_Op, std::string, std::unique_ptr<OperatorImpl>(const Concat_Op&)>,
+    public Registrable<Concat_Op, std::string, std::shared_ptr<OperatorImpl>(const Concat_Op&)>,
     public StaticAttributes<ConcatAttr, DimSize_t> {
 public:
     static const std::string Type;
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Concat_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Concat_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -108,7 +112,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Concat_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Concat_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 8290fb3d0d978e9af3291809c5057406424096d5..82cd5df8e24457bd9f5e07c89826904c7d2283ad 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -23,7 +23,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
-#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/Registrar.hpp" // SET_IMPL_MACRO
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
@@ -31,7 +31,7 @@ enum class ConvAttr { StrideDims, DilationDims, InChannels, OutChannels, KernelD
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
-                public Registrable<Conv_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
+                public Registrable<Conv_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>,
                 public StaticAttributes<ConvAttr, std::array<DimSize_t, DIM>, std::array<DimSize_t, DIM>, DimSize_t,
                                        DimSize_t, std::array<DimSize_t, DIM>> {
 
@@ -65,7 +65,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Conv_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -174,7 +178,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> co
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Conv_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Conv_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
@@ -245,4 +249,4 @@ const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
 };
 }
 
-#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_CONV_H_ */
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index a3b537ba60d03209e078dc94348f001603d2f3f5..7fa9124d4c750cee53d9c4a402a2fa6196ac8158 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -30,7 +30,7 @@ enum class ConvDepthWiseAttr { StrideDims, DilationDims, Channels, KernelDims };
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
-                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
+                public Registrable<ConvDepthWise_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>,
                 public StaticAttributes<ConvDepthWiseAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
@@ -67,7 +67,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<ConvDepthWise_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -168,7 +172,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ConvDepthWise_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index a033c6920a374003ad869bddbf5641c48fc5f6e2..be654a3c015e5810892c1e23f08cc1f4b83b2d93 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class Div_Op : public OperatorTensor,
-    public Registrable<Div_Op, std::string, std::unique_ptr<OperatorImpl>(const Div_Op&)> {
+    public Registrable<Div_Op, std::string, std::shared_ptr<OperatorImpl>(const Div_Op&)> {
 
 public:
     static const std::string Type;
@@ -40,7 +40,11 @@ public:
     Div_Op(const Div_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Div_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Div_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -55,7 +59,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Div_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Div_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 6995cea5e4af9a17cf3d24516d9840850e701669..5a92b5dc45b6a090be0d9306dbfc21b1c0ae6edb 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Erf_Op : public OperatorTensor,
-    public Registrable<Erf_Op, std::string, std::unique_ptr<OperatorImpl>(const Erf_Op&)> {
+    public Registrable<Erf_Op, std::string, std::shared_ptr<OperatorImpl>(const Erf_Op&)> {
 public:
     static const std::string Type;
 
@@ -40,7 +40,11 @@ public:
     Erf_Op(const Erf_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Erf_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Erf_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -52,7 +56,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Erf_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Erf_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index f6d81b5781dd25c990f496fa9f592502c9705eba..c111e38b00e69c8d0aecd9df0023f07a47a3865d 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -32,7 +32,7 @@ enum class FCAttr { OutChannels, NoBias };
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const FC_Op &)>,
+                                 std::shared_ptr<OperatorImpl>(const FC_Op &)>,
               public StaticAttributes<FCAttr, DimSize_t, bool> {
 public:
     static const std::string Type;
@@ -57,7 +57,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<FC_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(FC_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -97,7 +101,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<FC_Op>::create(name)(*this);
+        SET_IMPL_MACRO(FC_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
 
         // By default, automatically set backend for weight and bias inputs
@@ -128,4 +132,4 @@ const char *const EnumStrings<Aidge::FCAttr>::data[] = {"OutChannels",
                                                         "NoBias"};
 }
 
-#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index f6647f99151304d0cf083aed109cc642c9f1ecc2..142f6582a3afbc85ccd951fcfeff2a924a35e718 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -32,7 +32,7 @@ enum class GatherAttr { Indices, GatheredShape, Axis };
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
-                                   std::unique_ptr<OperatorImpl>(const Gather_Op&)>,
+                                   std::shared_ptr<OperatorImpl>(const Gather_Op&)>,
                 public StaticAttributes<GatherAttr, std::vector<std::int64_t>, std::vector<DimSize_t>, std::int64_t> {
 
 public:
@@ -58,7 +58,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Gather_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Gather_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -72,7 +76,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Gather_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Gather_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index c315e671c2f084af869e3b21107066137496366b..20b0cdc4aa8a42043c37851ef110427a561e5e1d 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -110,8 +110,8 @@ public:
  * @brief Fictive custom operator not associated with any implementation.
  * Allows to import unknown operators and simulate new ones.
  * @param type Type of the fictive operator.
- * @param nbDataIn Number of input data.
- * @param nbIn Number input data + number of learnt parameters.
+ * @param nbData Number of input data.
+ * @param nbParam Number of parameters.
  * @param nbOut Number of output data.
  * @param name (optional) name of the Operator.
  * @return std::shared_ptr<Node> Node associated with the Generic Operator.
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 5976f1d88d70ae7fb716f4038e57da95242c3551..c48b85b4a7af71fde0f8136732597e098c966839 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -30,7 +30,7 @@ enum class LeakyReLUAttr {
 };
 
 class LeakyReLU_Op : public OperatorTensor,
-    public Registrable<LeakyReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
+    public Registrable<LeakyReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>,
     public StaticAttributes<LeakyReLUAttr, float> {
 public:
     static const std::string Type;
@@ -54,7 +54,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<LeakyReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(LeakyReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -68,7 +72,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<LeakyReLU_Op>::create(name)(*this);
+        SET_IMPL_MACRO(LeakyReLU_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index a011c8666bba55eb7254a8efcd432a3f680cd461..596aa634693941d8e3a23ac955281cfd131e56ef 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -27,7 +27,7 @@ namespace Aidge {
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)> {
+                                 std::shared_ptr<OperatorImpl>(const MatMul_Op &)> {
 public:
     static const std::string Type;
 
@@ -65,7 +65,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
-        mImpl = Registrar<MatMul_Op>::create(name)(*this);
+        SET_IMPL_MACRO(MatMul_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index b07fa38a41c664c4fcbf90227914264ec68390a0..06ac30158f80a946b9310a93c8f81cc3ee975c84 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -30,7 +30,7 @@ enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
-                public Registrable<MaxPooling_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
+                public Registrable<MaxPooling_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>,
                 public StaticAttributes<MaxPoolingAttr,
                                        std::array<DimSize_t, DIM>,
                                        std::array<DimSize_t, DIM>,
@@ -64,7 +64,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<MaxPooling_Op<DIM>>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -105,7 +109,7 @@ public:
 
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<MaxPooling_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 62fb9897384673c695895b54557b4cf637aa2447..3652cf9697c6bcfea4befe4cdcdf5b9efff8b70c 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -72,4 +72,4 @@ inline std::shared_ptr<Node> Move(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 8758021a9c3de1707a96bbfafc21686ded8b7e40..75304078829475b1488640dc39aeee8b64f1c3e5 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -29,7 +29,7 @@ namespace Aidge {
  * @brief Tensor element-wise multiplication.
  */
 class Mul_Op : public OperatorTensor,
-    public Registrable<Mul_Op, std::string, std::unique_ptr<OperatorImpl>(const Mul_Op&)> {
+    public Registrable<Mul_Op, std::string, std::shared_ptr<OperatorImpl>(const Mul_Op&)> {
 public:
     static const std::string Type;
 
@@ -57,7 +57,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Mul_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Mul_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -74,4 +74,4 @@ inline std::shared_ptr<Node> Mul(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index a0d2292b7860baa60fe537698784d4d250c81f42..396c60e46127ee9312745a92f9112dbc0742a584 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -115,15 +115,21 @@ public:
     virtual void setDataType(const DataType& dataType) const = 0;
 
     /**
-     * @brief Set the a new OperatorImpl to the Operator
+     * @brief Set a new OperatorImpl to the Operator
      *
      */
     inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
 
     /**
-     * @brief Minimum amount of data from a specific input required by the
-     * implementation to be run.
+     * @brief Get the OperatorImpl of the Operator
      *
+     */
+    inline std::shared_ptr<OperatorImpl> getImpl() const noexcept {
+        return mImpl;
+    }
+
+    /**
+     * @brief Minimum amount of data from a specific input for one computation pass.
      * @param inputIdx Index of the input analysed.
      * @return NbElts_t
      */
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index bb961295bfaad2999af01460c49833085ff50a92..dce2a6e9e5ea9e0c5fe9a841c587c1f7bbe36fc7 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -31,7 +31,7 @@ enum class PadBorderType { Constant, Edge, Reflect, Wrap };
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
-                public Registrable<Pad_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
+                public Registrable<Pad_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>,
                 public StaticAttributes<PadAttr,
                                        std::array<DimSize_t, 2*DIM>,
                                        PadBorderType,
@@ -98,7 +98,7 @@ public:
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Pad_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Pad_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index ba8d3d05877f9aa543518fff1d88f4e8a436b712..ec4eebf9ddba475310ba292dd5923ba50933545d 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Pow_Op : public OperatorTensor,
-    public Registrable<Pow_Op, std::string, std::unique_ptr<OperatorImpl>(const Pow_Op&)> {
+    public Registrable<Pow_Op, std::string, std::shared_ptr<OperatorImpl>(const Pow_Op&)> {
 public:
     static const std::string Type;
 
@@ -40,7 +40,11 @@ public:
     Pow_Op(const Pow_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Pow_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Pow_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -55,7 +59,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Pow_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Pow_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -72,4 +76,4 @@ inline std::shared_ptr<Node> Pow(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 0731498dd3e06541ed82a86a98c2ae0bb355f413..c9b1f6e4aa5d82006d4bed880151ac1a22a4882b 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -28,7 +28,7 @@ enum class ProdAttr { Constant };
 
 class Producer_Op
     : public OperatorTensor,
-      public Registrable<Producer_Op, std::string, std::unique_ptr<OperatorImpl>(
+      public Registrable<Producer_Op, std::string, std::shared_ptr<OperatorImpl>(
                                           const Producer_Op &)>,
       public StaticAttributes<ProdAttr, bool> {
 public:
@@ -67,9 +67,11 @@ public:
         for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
             mOutputs[i] = std::make_shared<Tensor>(*(op.getOutput(i)));
         }
-        mImpl = (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()}))
-            ? Registrar<Producer_Op>::create(mOutputs[0]->getImpl()->backend())(*this)
-            : std::make_shared<OperatorImpl>(*this);
+        if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
+            SET_IMPL_MACRO(Producer_Op, *this, mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = std::make_shared<OperatorImpl>(*this);
+        }
     }
 
     /**
@@ -92,9 +94,7 @@ public:
     inline const std::vector<DimSize_t> dims() const noexcept { return mOutputs[0]->dims(); }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        if (Registrar<Producer_Op>::exists({name})) {
-            mImpl = Registrar<Producer_Op>::create({name})(*this);
-        }
+        SET_IMPL_MACRO(Producer_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 0bb7cdffe421b973ae7c86b4569e7464b3cf6da4..5b8f5c4b819f9a2f8cf518bdc50c445fbce38102 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -26,7 +26,7 @@
 namespace Aidge {
 
 class ReLU_Op : public OperatorTensor,
-    public Registrable<ReLU_Op, std::string, std::unique_ptr<OperatorImpl>(const ReLU_Op&)> {
+    public Registrable<ReLU_Op, std::string, std::shared_ptr<OperatorImpl>(const ReLU_Op&)> {
 public:
     static const std::string Type;
 
@@ -39,7 +39,11 @@ public:
     ReLU_Op(const ReLU_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<ReLU_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReLU_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -52,7 +56,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ReLU_Op>::create(name)(*this);
+        SET_IMPL_MACRO(ReLU_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -69,4 +73,4 @@ inline std::shared_ptr<Node> ReLU(const std::string& name = "") {
 }
 }
 
-#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 5f07cddfa667e7e494defe38a5667332744c3e20..09f1d58359b265af58fd78ef8de54dd1944b5cf1 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -32,7 +32,7 @@ enum class ReduceMeanAttr { Axes, KeepDims };
 
 template <DimIdx_t DIM>
 class ReduceMean_Op : public OperatorTensor,
-                public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
+                public Registrable<ReduceMean_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
                 public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> {
 
    public:
@@ -57,7 +57,11 @@ class ReduceMean_Op : public OperatorTensor,
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<ReduceMean_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -99,7 +103,7 @@ class ReduceMean_Op : public OperatorTensor,
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<ReduceMean_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(ReduceMean_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 32d71d5adc3cfd92c9840dcb5bc61bfb6399c6db..8914bbc9a9f3748276ead32aba8cb023ba14b1b7 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -28,7 +28,7 @@ namespace Aidge {
 enum class ReshapeAttr { Shape };
 
 class Reshape_Op : public OperatorTensor,
-                   public Registrable<Reshape_Op, std::string, std::unique_ptr<OperatorImpl>(const Reshape_Op&)>,
+                   public Registrable<Reshape_Op, std::string, std::shared_ptr<OperatorImpl>(const Reshape_Op&)>,
                    public StaticAttributes<ReshapeAttr, std::vector<std::int64_t>> {
 
 public:
@@ -53,7 +53,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Reshape_Op>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Reshape_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -67,7 +71,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Reshape_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Reshape_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 54f1d98d2f61d18dd821c9f0a6b574bb52b0c9f0..29ce0527a9b8b15c7b45c0b0241a83957abb5565 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Scaling_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Scaling_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        } else {
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -95,4 +99,4 @@ const char* const EnumStrings<Aidge::ScalingAttr>::data[]
     = {"scalingFactor", "quantizedNbBits", "isOutputUnsigned"};
 }
 
-#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
\ No newline at end of file
+#endif /* __AIDGE_CORE_OPERATOR_RELU_H__ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 4a073bc525640846c28d718d09741a67d499830e..363c3c2b4ec397fdd62dc3260b63a0cd6d6c0081 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -28,7 +28,7 @@ enum class SliceAttr { Starts, Ends, Axes };
 
 class Slice_Op
     : public OperatorTensor,
-      public Registrable<Slice_Op, std::string, std::unique_ptr<OperatorImpl>(const Slice_Op &)>,
+      public Registrable<Slice_Op, std::string, std::shared_ptr<OperatorImpl>(const Slice_Op &)>,
       public StaticAttributes<SliceAttr, std::vector<std::int64_t>, std::vector<std::int64_t>, std::vector<std::int64_t>> {
 public:
     static const std::string Type;
@@ -55,8 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Slice_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this)
-                         : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Slice_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
 public:
@@ -69,7 +72,7 @@ public:
     void computeOutputDims() override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Slice_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Slice_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index ed6689dc97ef17276df260cd90649f2a75b10007..943f69a588ebfedf28ec5ebb3a782e7510fa710a 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -33,7 +33,7 @@ enum class SoftmaxAttr { AxisIdx };
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
-                                   std::unique_ptr<OperatorImpl>(const Softmax_Op&)>,
+                                   std::shared_ptr<OperatorImpl>(const Softmax_Op&)>,
                 public StaticAttributes<SoftmaxAttr, int> {
 
 public:
@@ -55,7 +55,11 @@ public:
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Softmax_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Softmax_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -67,7 +71,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Softmax_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Softmax_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 32adfdb93db1e9da857f4147efdcfe64bbb34475..dd3fa541b9fd5177ddd3b9e8bcd781c0ea3a1867 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Sqrt_Op : public OperatorTensor,
-    public Registrable<Sqrt_Op, std::string, std::unique_ptr<OperatorImpl>(const Sqrt_Op&)> {
+    public Registrable<Sqrt_Op, std::string, std::shared_ptr<OperatorImpl>(const Sqrt_Op&)> {
 public:
     // FIXME: change accessibility
     std::shared_ptr<Tensor> mInput = std::make_shared<Tensor>();
@@ -45,7 +45,11 @@ public:
     Sqrt_Op(const Sqrt_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sqrt_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Sqrt_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -57,7 +61,7 @@ public:
     }
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Sqrt_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Sqrt_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 7d346457ead71724ba05da70b5bdf7ad145cbe0c..5683a9be5ea2278d92fe7da081f0c4a80ff9500d 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -27,7 +27,7 @@
 namespace Aidge {
 
 class Sub_Op : public OperatorTensor,
-    public Registrable<Sub_Op, std::string, std::unique_ptr<OperatorImpl>(const Sub_Op&)> {
+    public Registrable<Sub_Op, std::string, std::shared_ptr<OperatorImpl>(const Sub_Op&)> {
 public:
     // FIXME: change accessibility
     std::array<std::shared_ptr<Tensor>, 2> mInputs = {std::make_shared<Tensor>(), std::make_shared<Tensor>()};
@@ -45,7 +45,11 @@ public:
     Sub_Op(const Sub_Op& op)
         : OperatorTensor(op)
     {
-        mImpl = op.mImpl ? Registrar<Sub_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Sub_Op, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -60,7 +64,7 @@ public:
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Sub_Op>::create(name)(*this);
+        SET_IMPL_MACRO(Sub_Op, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
@@ -77,4 +81,4 @@ inline std::shared_ptr<Node> Sub(const std::string& name = "") {
 }
 } // namespace Aidge
 
-#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
\ No newline at end of file
+#endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 2262bec14bd2f00cda643ade0709f7f9d509fa22..b040fc907dd5ac1f40a8a1885d27364785ba9188 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -30,7 +30,7 @@ enum class TransposeAttr { OutputDimsOrder };
 
 template <DimIdx_t DIM>
 class Transpose_Op : public OperatorTensor,
-                public Registrable<Transpose_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
+                public Registrable<Transpose_Op<DIM>, std::string, std::shared_ptr<OperatorImpl>(const Transpose_Op<DIM> &)>,
                 public StaticAttributes<TransposeAttr,
                                        std::array<DimSize_t, DIM>> {
 
@@ -56,7 +56,11 @@ class Transpose_Op : public OperatorTensor,
         : OperatorTensor(op),
           Attributes_(op)
     {
-        mImpl = op.mImpl ? Registrar<Transpose_Op<DIM>>::create(mOutputs[0]->getImpl()->backend())(*this) : nullptr;
+        if (op.mImpl){
+            SET_IMPL_MACRO(Transpose_Op<DIM>, *this, op.mOutputs[0]->getImpl()->backend());
+        }else{
+            mImpl = nullptr;
+        }
     }
 
     /**
@@ -80,7 +84,7 @@ class Transpose_Op : public OperatorTensor,
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
-        mImpl = Registrar<Transpose_Op<DIM>>::create(name)(*this);
+        SET_IMPL_MACRO(Transpose_Op<DIM>, *this, name);
         mOutputs[0]->setBackend(name, device);
     }
 
diff --git a/include/aidge/utils/ErrorHandling.hpp b/include/aidge/utils/ErrorHandling.hpp
index 653a774b92e26513c9ac555e0aec1daed793e208..d4235d2db9b06597df80966e67306d84ac814a3c 100644
--- a/include/aidge/utils/ErrorHandling.hpp
+++ b/include/aidge/utils/ErrorHandling.hpp
@@ -18,13 +18,15 @@
 #include <fmt/format.h>
 #include <fmt/ranges.h>
 
+#include "aidge/utils/Log.hpp"
+
 #ifdef NO_EXCEPTION
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
-do { fmt::print(__VA_ARGS__); std::abort(); } while (false)
+do { Aidge::Log::fatal(__VA_ARGS__); std::abort(); } while (false)
 #else
 #include <stdexcept>
 #define AIDGE_THROW_OR_ABORT(ex, ...) \
-throw ex(fmt::format(__VA_ARGS__))
+do { Aidge::Log::fatal(__VA_ARGS__); throw ex(fmt::format(__VA_ARGS__)); } while (false)
 #endif
 
 /**
@@ -33,7 +35,7 @@ throw ex(fmt::format(__VA_ARGS__))
  * If it asserts, it means an user error.
 */
 #define AIDGE_ASSERT(stm, ...) \
-if (!(stm)) { fmt::print("Assertion failed: " #stm " in {}:{}", __FILE__, __LINE__); \
+if (!(stm)) { Aidge::Log::error("Assertion failed: " #stm " in {}:{}", __FILE__, __LINE__); \
     AIDGE_THROW_OR_ABORT(std::runtime_error, __VA_ARGS__); }
 
 /**
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..8a18bbab34d3c1c86252833852abc5faca41dd96
--- /dev/null
+++ b/include/aidge/utils/Log.hpp
@@ -0,0 +1,148 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef AIDGE_LOG_H_
+#define AIDGE_LOG_H_
+
+#include <memory>
+
+#include <fmt/format.h>
+#include <fmt/ranges.h>
+
+namespace Aidge {
+/**
+ * Aidge logging class, for displaying and file logging of events.
+*/
+class Log {
+public:
+    enum Level {
+        Debug = 0,
+        Info,
+        Notice,
+        Warn,
+        Error,
+        Fatal
+    };
+
+    /**
+     * Detailed messages for debugging purposes, providing information helpful 
+     * for developers to trace and identify issues.
+     * Detailed insights of what is appening in an operation, not useful for the
+     * end-user. The operation is performed nominally.
+     * @note This level is disabled at compile time for Release, therefore
+     * inducing no runtime overhead for Release.
+    */
+    template <typename... Args>
+    constexpr static void debug(Args&&... args) {
+#ifndef NDEBUG
+        // only when compiled in Debug
+        log(Debug, fmt::format(std::forward<Args>(args)...));
+#endif
+    }
+
+    /**
+     * Messages that provide a record of the normal operation, about 
+     * the application's state, progress, or important events.
+     * Reports normal start, end and key steps in an operation. The operation is
+     * performed nominally.
+    */
+    template <typename... Args>
+    constexpr static void info(Args&&... args) {
+        log(Info, fmt::format(std::forward<Args>(args)...));
+    }
+
+    /**
+     * Applies to normal but significant conditions that may require monitoring,
+     * like unusual or normal fallback events.
+     * Reports specific paths in an operation. The operation can still be
+     * performed normally.
+    */
+    template <typename... Args>
+    constexpr static void notice(Args&&... args) {
+        log(Notice, fmt::format(std::forward<Args>(args)...));
+    }
+
+    /**
+     * Indicates potential issues or situations that may lead to errors but do
+     * not necessarily cause immediate problems.
+     * Some specific steps of the operation could not be performed, but it can
+     * still provide an exploitable result.
+    */
+    template <typename... Args>
+    constexpr static void warn(Args&&... args) {
+        log(Warn, fmt::format(std::forward<Args>(args)...));
+    }
+
+    /**
+     * Signifies a problem or unexpected condition that the application can 
+     * recover from, but attention is needed to prevent further issues.
+     * The operation could not be performed, but it does not prevent potential
+     * further operations.
+    */
+    template <typename... Args>
+    constexpr static void error(Args&&... args) {
+        log(Error, fmt::format(std::forward<Args>(args)...));
+    }
+
+    /**
+     * Represents a critical error or condition that leads to the termination of
+     * the application, indicating a severe and unrecoverable problem.
+     * The operation could not be performed and any further operation is
+     * impossible.
+    */
+    template <typename... Args>
+    constexpr static void fatal(Args&&... args) {
+        log(Fatal, fmt::format(std::forward<Args>(args)...));
+    }
+
+    /**
+     * Set the minimum log level displayed in the console.
+    */
+    constexpr static void setConsoleLevel(Level level) {
+        mConsoleLevel = level;
+    }
+
+    /**
+     * Set the minimum log level saved in the log file.
+    */
+    constexpr static void setFileLevel(Level level) {
+        mFileLevel = level;
+    }
+
+    /**
+     * Set the log file name.
+     * Close the current log file and open the one with the new file name.
+     * If empty, stop logging into a file.
+    */
+    static void setFileName(const std::string& fileName) {
+        if (fileName != mFileName) {
+            mFileName = fileName;
+            mFile.release();
+
+            if (!fileName.empty()) {
+                initFile(fileName);
+            }
+        }
+    }
+
+private:
+    static void log(Level level, const std::string& msg);
+    static void initFile(const std::string& fileName);
+
+    static Level mConsoleLevel;
+    static Level mFileLevel;
+    static std::string mFileName;
+    static std::unique_ptr<FILE, decltype(&std::fclose)> mFile;
+};
+}
+
+#endif //AIDGE_LOG_H_
diff --git a/include/aidge/utils/Registrar.hpp b/include/aidge/utils/Registrar.hpp
index 4d604d520d3d8af532e196c7785896ddc1c242d0..a5bd260ec189ac998134b738ca1ae757f2a0038c 100644
--- a/include/aidge/utils/Registrar.hpp
+++ b/include/aidge/utils/Registrar.hpp
@@ -14,6 +14,9 @@
 
 #ifdef PYBIND
 #include <pybind11/pybind11.h>
+#include <pybind11/stl.h> // declare_registrable key can recquire stl
+#include <pybind11/functional.h>// declare_registrable allow binding of lambda fn
+
 #endif
 
 #include "aidge/utils/ErrorHandling.hpp"
@@ -27,6 +30,9 @@ namespace Aidge {
 namespace py = pybind11;
 #endif
 
+// Abstract class used to test if a class is Registrable.
+class AbstractRegistrable {};
+
 template <class DerivedClass, class Key, class Func> // curiously rucurring template pattern
 class Registrable {
 public:
@@ -58,8 +64,10 @@ struct Registrar {
 
     Registrar(const registrar_key& key, registrar_type func) {
         //fmt::print("REGISTRAR: {}\n", key);
-        bool newInsert;
-        std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func));
+        // bool newInsert;
+        // std::tie(std::ignore, newInsert) = C::registry().insert(std::make_pair(key, func));
+        C::registry().erase(key);
+        C::registry().insert(std::make_pair(key, func));
         //assert(newInsert && "registrar already exists");
     }
 
@@ -81,6 +89,62 @@ struct Registrar {
         return keys;
     }
 };
+
+#ifdef PYBIND
+/**
+ * @brief Function to define register function for a registrable class
+ * Defined here to have access to this function in every module who wants
+ * to create a new registrable class.
+ *
+ * @tparam C registrable class
+ * @param m pybind module
+ * @param class_name python name of the class
+ */
+template <class C>
+void declare_registrable(py::module& m, const std::string& class_name){
+    typedef typename C::registrar_key registrar_key;
+    typedef typename C::registrar_type registrar_type;
+    m.def(("register_"+ class_name).c_str(), [](registrar_key& key, registrar_type function){
+        Registrar<C>(key, function);
+    })
+    .def(("get_keys_"+ class_name).c_str(), [](){
+        return Registrar<C>::getKeys();
+    });
+}
+#endif
+
+/*
+* This macro allow to set an implementation to an operator
+* This macro is mandatory for using implementation registered in python
+* PyBind when calling create method will do a call to the copy ctor if
+* op is not visible to the python world (if the create method return a python function)
+* See this issue for more information https://github.com/pybind/pybind11/issues/4417
+* Note: using a method to do this is not possible has any call to a function will call
+* the cpy ctor. This is why I used a macro
+* Note: I duplicated
+*             (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+* This is because the py::cast need to be done in the same scope.
+* I know this only empyrically not sure what happens under the hood...
+*
+* If someone wants to find an alternative to this Macro, you can contact me:
+*   cyril.moineau@cea.fr
+*/
+#ifdef PYBIND
+#define SET_IMPL_MACRO(T_Op, op, backend_name) \
+     \
+        if(Py_IsInitialized()) { \
+            auto obj = py::cast(&(op)); \
+            (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        } else { \
+            (op).setImpl(Registrar<T_Op>::create(backend_name)(op)); \
+        }
+#else
+#define SET_IMPL_MACRO(T_Op, op, backend_name)                          \
+    if (Registrar<T_Op>::exists(backend_name)) {                        \
+        (op).setImpl(Registrar<T_Op>::create(backend_name)(op));        \
+    }
+#endif
+
 }
 
 #endif //AIDGE_CORE_UTILS_REGISTRAR_H_
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index a2a5e6b8bb2d0f2413ef94c360b383608c5b41b5..91d65484a122d6a651758e16eb0e925b6e0bfdd0 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -116,7 +116,7 @@ public:
 void init_OperatorImpl(py::module& m){
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
-    .def(py::init<const Operator&>())
+    .def(py::init<const Operator&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>())
     .def("forward", &OperatorImpl::forward)
     .def("backward", &OperatorImpl::backward)
     .def("get_nb_required_data", &OperatorImpl::getNbRequiredData)
diff --git a/python_binding/operator/pybind_Add.cpp b/python_binding/operator/pybind_Add.cpp
index 74ec11c28e746856fe767f16a4380651271d8fe4..661c96bb835fa3ac719ab10dbf83e4137f1bb248 100644
--- a/python_binding/operator/pybind_Add.cpp
+++ b/python_binding/operator/pybind_Add.cpp
@@ -23,7 +23,7 @@ void declare_Add(py::module &m) {
   py::class_<Add_Op, std::shared_ptr<Add_Op>, OperatorTensor>(m, "AddOp", py::multiple_inheritance())
   .def("get_inputs_name", &Add_Op::getInputsName)
   .def("get_outputs_name", &Add_Op::getOutputsName);
-
+  declare_registrable<Add_Op>(m, "AddOp");
   m.def("Add", &Add, py::arg("nbIn"), py::arg("name") = "");
 }
 
diff --git a/python_binding/operator/pybind_AvgPooling.cpp b/python_binding/operator/pybind_AvgPooling.cpp
index 0ca01c07535f65ac1161603d32d191881eb28746..c44c7b49ade1e47438f80f0b3f3a83c18eb4e0fa 100644
--- a/python_binding/operator/pybind_AvgPooling.cpp
+++ b/python_binding/operator/pybind_AvgPooling.cpp
@@ -26,8 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
+  const std::string pyClassName("AvgPoolingOp" + std::to_string(DIM) + "D");
   py::class_<AvgPooling_Op<DIM>, std::shared_ptr<AvgPooling_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("AvgPoolingOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, DIM> &,
                 const std::array<DimSize_t, DIM> &>(),
@@ -36,7 +37,7 @@ template <DimIdx_t DIM> void declare_AvgPoolingOp(py::module &m) {
   .def("get_inputs_name", &AvgPooling_Op<DIM>::getInputsName)
   .def("get_outputs_name", &AvgPooling_Op<DIM>::getOutputsName)
   .def("attributes_name", &AvgPooling_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<AvgPooling_Op<DIM>>(m, pyClassName);
   m.def(("AvgPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims) {
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index e11fc288fb9eb837c0a7b36c0a1c4024ab6c8633..7020c35f63880e77ecd3c2011a1b3c74bed847ed 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -21,13 +21,12 @@ namespace Aidge {
 
 template <DimSize_t DIM>
 void declare_BatchNormOp(py::module& m) {
-    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, ("BatchNormOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
-    .def(py::init<float, float>(),
-            py::arg("epsilon"),
-            py::arg("momentum"))
+    const std::string pyClassName("BatchNormOp" + std::to_string(DIM) + "D");
+    py::class_<BatchNorm_Op<DIM>, std::shared_ptr<BatchNorm_Op<DIM>>, Attributes, OperatorTensor>(m, pyClassName.c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &BatchNorm_Op<DIM>::getInputsName)
     .def("get_outputs_name", &BatchNorm_Op<DIM>::getOutputsName)
     .def("attributes_name", &BatchNorm_Op<DIM>::staticGetAttrsName);
+    declare_registrable<BatchNorm_Op<DIM>>(m, pyClassName);
 
     m.def(("BatchNorm" + std::to_string(DIM) + "D").c_str(), &BatchNorm<DIM>, py::arg("nbFeatures"), py::arg("epsilon") = 1.0e-5F, py::arg("momentum") = 0.1F, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 8cdd138b8cde2a582e9f569a17ae33811637092c..38d8a20cba1eafc255b1da313d35ad8be116620d 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -24,6 +24,7 @@ void init_Concat(py::module& m) {
     .def("get_outputs_name", &Concat_Op::getOutputsName)
     .def("attributes_name", &Concat_Op::staticGetAttrsName);
 
+    declare_registrable<Concat_Op>(m, "ConcatOp");
     m.def("Concat", &Concat, py::arg("nbIn"), py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Conv.cpp b/python_binding/operator/pybind_Conv.cpp
index 346acc5d9d05c24e9538c3b8c5edf1f7e37d6ba8..aea402017622655a577ac4f9e207141bff01d70d 100644
--- a/python_binding/operator/pybind_Conv.cpp
+++ b/python_binding/operator/pybind_Conv.cpp
@@ -19,13 +19,15 @@
 #include "aidge/operator/Conv.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Types.h"
+#include "aidge/utils/Registrar.hpp" // declare_registrable
 
 namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
+  const std::string pyClassName("ConvOp" + std::to_string(DIM) + "D");
   py::class_<Conv_Op<DIM>, std::shared_ptr<Conv_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ConvOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<DimSize_t,
                 DimSize_t,
@@ -41,6 +43,8 @@ template <DimIdx_t DIM> void declare_ConvOp(py::module &m) {
     .def("get_outputs_name", &Conv_Op<DIM>::getOutputsName)
     .def("attributes_name", &Conv_Op<DIM>::staticGetAttrsName)
     ;
+  declare_registrable<Conv_Op<DIM>>(m, pyClassName);
+
 
   m.def(("Conv" + std::to_string(DIM) + "D").c_str(), [](DimSize_t in_channels,
                                                          DimSize_t out_channels,
@@ -66,9 +70,5 @@ void init_Conv(py::module &m) {
   declare_ConvOp<1>(m);
   declare_ConvOp<2>(m);
   declare_ConvOp<3>(m);
-
-  // FIXME:
-  // m.def("Conv1D", static_cast<NodeAPI(*)(const char*, int, int, int const
-  // (&)[1])>(&Conv));
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_ConvDepthWise.cpp b/python_binding/operator/pybind_ConvDepthWise.cpp
index e25024e09cdd4fe234416a9aa8f0fef91a3c27fe..83eac8742628bf2e0921e6a17dd46226c46fbea1 100644
--- a/python_binding/operator/pybind_ConvDepthWise.cpp
+++ b/python_binding/operator/pybind_ConvDepthWise.cpp
@@ -26,8 +26,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
+  const std::string pyClassName("ConvDepthWiseOp" + std::to_string(DIM) + "D");
   py::class_<ConvDepthWise_Op<DIM>, std::shared_ptr<ConvDepthWise_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ConvDepthWiseOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const DimSize_t,
                 const std::array<DimSize_t, DIM> &,
@@ -40,7 +41,7 @@ template <DimIdx_t DIM> void declare_ConvDepthWiseOp(py::module &m) {
   .def("get_inputs_name", &ConvDepthWise_Op<DIM>::getInputsName)
   .def("get_outputs_name", &ConvDepthWise_Op<DIM>::getOutputsName)
   .def("attributes_name", &ConvDepthWise_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<ConvDepthWise_Op<DIM>>(m, pyClassName);
   m.def(("ConvDepthWise" + std::to_string(DIM) + "D").c_str(), [](const DimSize_t nb_channels,
                                                                   const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index 6d14510f34349c001289096a7fc9b08681a25bc8..2996e0bcae6d69d9ad2ef0d4d8eee8489cd8cdc8 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -21,7 +21,7 @@ void init_Div(py::module& m) {
     py::class_<Div_Op, std::shared_ptr<Div_Op>, OperatorTensor>(m, "DivOp", py::multiple_inheritance())
     .def("get_inputs_name", &Div_Op::getInputsName)
     .def("get_outputs_name", &Div_Op::getOutputsName);
-
+    declare_registrable<Div_Op>(m, "DivOp");
     m.def("Div", &Div, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 806867f61c3580543c184d529edc2856ee8d7a6c..e1aef08ad597d92c4cf4b6d5a2cff487e438538e 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -21,7 +21,7 @@ void init_Erf(py::module& m) {
     py::class_<Erf_Op, std::shared_ptr<Erf_Op>, OperatorTensor>(m, "ErfOp", py::multiple_inheritance())
     .def("get_inputs_name", &Erf_Op::getInputsName)
     .def("get_outputs_name", &Erf_Op::getOutputsName);
-
+    declare_registrable<Erf_Op>(m, "ErfOp");
     m.def("Erf", &Erf, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_FC.cpp b/python_binding/operator/pybind_FC.cpp
index ad589d73d0aea94d96e62e8065b70bd517633f88..0b13643cbd3ebb265dab62a1030729fca62dea62 100644
--- a/python_binding/operator/pybind_FC.cpp
+++ b/python_binding/operator/pybind_FC.cpp
@@ -24,7 +24,7 @@ void declare_FC(py::module &m) {
   .def("get_inputs_name", &FC_Op::getInputsName)
   .def("get_outputs_name", &FC_Op::getOutputsName)
   .def("attributes_name", &FC_Op::staticGetAttrsName);
-
+  declare_registrable<FC_Op>(m, "FCOp");
   m.def("FC", &FC, py::arg("in_channels"), py::arg("out_channels"), py::arg("nobias") = false, py::arg("name") = "");
 }
 
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index f0d55e2f40bd89269c96564cea6b5a002b477b8b..db6bdb15a2e6288b5f775d538a5e14f15d79d2c1 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -23,7 +23,7 @@ void init_Gather(py::module& m) {
     .def("get_inputs_name", &Gather_Op::getInputsName)
     .def("get_outputs_name", &Gather_Op::getOutputsName)
     .def("attributes_name", &Gather_Op::staticGetAttrsName);
-
-    m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis"), py::arg("name") = "");
+    declare_registrable<Gather_Op>(m, "GatherOp");
+    m.def("Gather", &Gather, py::arg("indices"), py::arg("gathered_shape"), py::arg("axis")= 0, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index 3e9acb831eb3334bd126d3b360f3b5aa39d83731..66b2c34a9a558d20d90f71dd590d9fe8c370c10d 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -22,7 +22,7 @@ void init_LeakyReLU(py::module& m) {
     .def("get_inputs_name", &LeakyReLU_Op::getInputsName)
     .def("get_outputs_name", &LeakyReLU_Op::getOutputsName)
     .def("attributes_name", &LeakyReLU_Op::staticGetAttrsName);
-
+    declare_registrable<LeakyReLU_Op>(m, "LeakyReLUOp");
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index d0d7f28d52a9a9899b08d37a0c1a4a8720f2ae20..383bad54be08905c5e9248ab3f7bf5c83bddc836 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -23,7 +23,7 @@ void init_MatMul(py::module &m) {
   py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
   .def("get_outputs_name", &MatMul_Op::getOutputsName);
-
+  declare_registrable<MatMul_Op>(m, "MatMulOp");
   m.def("MatMul", &MatMul, py::arg("name") = "");
 }
 } // namespace Aidge
diff --git a/python_binding/operator/pybind_MaxPooling.cpp b/python_binding/operator/pybind_MaxPooling.cpp
index 9c83a67e81120e2cc2674e3ceb4c8871dd6fd393..8a5e3db9decd01bd5fabe5897847f939e7fa02b3 100644
--- a/python_binding/operator/pybind_MaxPooling.cpp
+++ b/python_binding/operator/pybind_MaxPooling.cpp
@@ -26,6 +26,7 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
+  const std::string pyClassName("MaxPoolingOp" + std::to_string(DIM) + "D");
   py::class_<MaxPooling_Op<DIM>, std::shared_ptr<MaxPooling_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("MaxPoolingOp" + std::to_string(DIM) + "D").c_str(),
     py::multiple_inheritance())
@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_MaxPoolingOp(py::module &m) {
   .def("get_inputs_name", &MaxPooling_Op<DIM>::getInputsName)
   .def("get_outputs_name", &MaxPooling_Op<DIM>::getOutputsName)
   .def("attributes_name", &MaxPooling_Op<DIM>::staticGetAttrsName);
-
+  declare_registrable<MaxPooling_Op<DIM>>(m, pyClassName);
   m.def(("MaxPooling" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& kernel_dims,
                                                                   const std::string& name,
                                                                   const std::vector<DimSize_t> &stride_dims,
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 21f510d98728fbe5401288a366294241b5f10a3f..5354f01ca508eb6ff04304d1f4072f431339973c 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -21,7 +21,7 @@ void init_Mul(py::module& m) {
     py::class_<Mul_Op, std::shared_ptr<Mul_Op>, OperatorTensor>(m, "MulOp", py::multiple_inheritance())
     .def("get_inputs_name", &Mul_Op::getInputsName)
     .def("get_outputs_name", &Mul_Op::getOutputsName);
-
+    declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 79a85cb92cf27c7edb745c36eefe61ae86c66786..05d6cd089754d1155e1506b4a491af7919bc4d31 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -1,3 +1,4 @@
+
 /********************************************************************************
  * Copyright (c) 2023 CEA-List
  *
@@ -32,10 +33,11 @@ void init_Operator(py::module& m){
     .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
     .def("set_backend", &Operator::setBackend, py::arg("name"), py::arg("device") = 0)
     .def("forward", &Operator::forward)
-    // py::keep_alive forbide Python to garbage collect implementation will the Operator is not garbade collected !
+    // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
     .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
+    .def("get_impl", &Operator::getImpl)
     .def("get_hook", &Operator::getHook)
     .def("add_hook", &Operator::addHook)
     ;
 }
-}
\ No newline at end of file
+}
diff --git a/python_binding/operator/pybind_Pad.cpp b/python_binding/operator/pybind_Pad.cpp
index 69d63fe7b8d31a6fa9747df2ce4a93ec4a0f4cac..d784a0d6ab7803bbc078b12b39df9ad8ef2f768e 100644
--- a/python_binding/operator/pybind_Pad.cpp
+++ b/python_binding/operator/pybind_Pad.cpp
@@ -25,8 +25,9 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
+  const std::string pyClassName("PadOp" + std::to_string(DIM) + "D");
   py::class_<Pad_Op<DIM>, std::shared_ptr<Pad_Op<DIM>>, Attributes, Operator>(
-    m, ("PadOp" + std::to_string(DIM) + "D").c_str(),
+    m, pyClassName.c_str(),
     py::multiple_inheritance())
   .def(py::init<const std::array<DimSize_t, 2*DIM> &,
                 const PadBorderType &,
@@ -38,7 +39,7 @@ template <DimIdx_t DIM> void declare_PadOp(py::module &m) {
     .def("get_outputs_name", &Pad_Op<DIM>::getOutputsName)
     .def("attributes_name", &Pad_Op<DIM>::staticGetAttrsName)
     ;
-
+  declare_registrable<Pad_Op<DIM>>(m, pyClassName);
   m.def(("Pad" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& beginEndTuples,
                                                         const std::string& name,
                                                         const PadBorderType &borderType = PadBorderType::Constant,
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index 09d1e4ad2ad6413901c28bc9d9fe16995483da05..03e822adbd326b6ad9693d58b53cd9f8f4bc3ac8 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -21,6 +21,7 @@ void init_Pow(py::module& m) {
     py::class_<Pow_Op, std::shared_ptr<Pow_Op>, OperatorTensor>(m, "PowOp", py::multiple_inheritance())
     .def("get_inputs_name", &Pow_Op::getInputsName)
     .def("get_outputs_name", &Pow_Op::getOutputsName);
+    declare_registrable<Pow_Op>(m, "PowOp");
 
     m.def("Pow", &Pow, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_Producer.cpp b/python_binding/operator/pybind_Producer.cpp
index 3caa438d18b3919dbedcf66e4ba53b92b84a50b5..025c8c5dd1651b3466a22e88f0966a7f51d2c109 100644
--- a/python_binding/operator/pybind_Producer.cpp
+++ b/python_binding/operator/pybind_Producer.cpp
@@ -26,6 +26,7 @@ void declare_Producer(py::module &m) {
     // m.def(("Producer_" + std::to_string(DIM)+"D").c_str(), py::overload_cast<shared_ptr<Node>&>(&Producer<DIM>), py::arg("dims"), py::arg("name"));
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::array<DimSize_t, DIM>&, const std::string&, bool)>(&Producer), py::arg("dims"), py::arg("name") = "", py::arg("constant") = false);
 
+
 }
 
 
@@ -39,7 +40,7 @@ void init_Producer(py::module &m) {
     .def("get_outputs_name", &Producer_Op::getOutputsName)
     .def("attributes_name", &Producer_Op::staticGetAttrsName);
     m.def("Producer", static_cast<std::shared_ptr<Node>(*)(const std::shared_ptr<Tensor>, const std::string&, bool)>(&Producer), py::arg("tensor"), py::arg("name") = "", py::arg("constant") = false);
-
+    declare_registrable<Producer_Op>(m, "ProducerOp");
     declare_Producer<1>(m);
     declare_Producer<2>(m);
     declare_Producer<3>(m);
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 24ae96649a87ff9acc996715d3cd00a97c393578..f08c67cb98b629b8d1b61471c6f50a0de4c421d6 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -21,6 +21,7 @@ void init_ReLU(py::module& m) {
     py::class_<ReLU_Op, std::shared_ptr<ReLU_Op>, OperatorTensor>(m, "ReLUOp", py::multiple_inheritance())
     .def("get_inputs_name", &ReLU_Op::getInputsName)
     .def("get_outputs_name", &ReLU_Op::getOutputsName);
+    declare_registrable<ReLU_Op>(m, "ReLUOp");
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
 }
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 11e979736dcab211aa11758cb3138f9d6827cc4e..fbec6864042cf16a877faa67b351be5eb3f9b1eb 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -24,12 +24,14 @@ namespace py = pybind11;
 namespace Aidge {
 
 template <DimIdx_t DIM> void declare_ReduceMeanOp(py::module &m) {
+  const std::string pyClassName("ReduceMeanOp" + std::to_string(DIM) + "D");
   py::class_<ReduceMean_Op<DIM>, std::shared_ptr<ReduceMean_Op<DIM>>, Attributes, OperatorTensor>(
-    m, ("ReduceMeanOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
+    m, pyClassName.c_str(), py::multiple_inheritance())
     .def("get_inputs_name", &ReduceMean_Op<DIM>::getInputsName)
     .def("get_outputs_name", &ReduceMean_Op<DIM>::getOutputsName)
     .def("attributes_name", &ReduceMean_Op<DIM>::staticGetAttrsName)
     ;
+  declare_registrable<ReduceMean_Op<DIM>>(m, pyClassName);
 
   m.def(("ReduceMean" + std::to_string(DIM) + "D").c_str(), [](const std::vector<int>& axes,
                                                                 DimSize_t keepDims,
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index b3e9850a54a36e440876dace2b635a122c63b4af..dc6a9b4ec5de297df7c1c52877974ab84d55a0c2 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -21,7 +21,7 @@ void init_Reshape(py::module& m) {
     py::class_<Reshape_Op, std::shared_ptr<Reshape_Op>, Attributes, OperatorTensor>(m, "ReshapeOp", py::multiple_inheritance())
     .def("get_inputs_name", &Reshape_Op::getInputsName)
     .def("get_outputs_name", &Reshape_Op::getOutputsName);
-
+    declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index 7bfd1b4f00579ed29658db73b71f2c596048fe75..3bb1b082c19b98447726b0fb980cbd8688fd5ba3 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -21,7 +21,7 @@ void init_Slice(py::module& m) {
     py::class_<Slice_Op, std::shared_ptr<Slice_Op>, OperatorTensor>(m, "SliceOp", py::multiple_inheritance())
     .def("get_inputs_name", &Slice_Op::getInputsName)
     .def("get_outputs_name", &Slice_Op::getOutputsName);
-
+    declare_registrable<Slice_Op>(m, "SliceOp");
     m.def("Slice", &Slice, py::arg("starts"), py::arg("ends"), py::arg("axes"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 780cffdef695b71dbc2781ba30936b3b45657cbb..bac553387a00856f2d4e01dea95e630a59666938 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -23,7 +23,7 @@ void init_Softmax(py::module& m) {
     .def("get_inputs_name", &Softmax_Op::getInputsName)
     .def("get_outputs_name", &Softmax_Op::getOutputsName)
     .def("attributes_name", &Softmax_Op::staticGetAttrsName);
-
+    declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index 98d65242e8ff199992bbfc740192ae25e6d7b738..33d46e02caee1046cbbdbaaa186c4898db5b10c1 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -21,7 +21,7 @@ void init_Sqrt(py::module& m) {
     py::class_<Sqrt_Op, std::shared_ptr<Sqrt_Op>, OperatorTensor>(m, "SqrtOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sqrt_Op::getInputsName)
     .def("get_outputs_name", &Sqrt_Op::getOutputsName);
-
+    declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index dce1ab6cb27cc7da02e6c817a6bc49ec64bcf364..1b858d1527eb3969e2acad9c0206311ff2981f17 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -21,7 +21,7 @@ void init_Sub(py::module& m) {
     py::class_<Sub_Op, std::shared_ptr<Sub_Op>, OperatorTensor>(m, "SubOp", py::multiple_inheritance())
     .def("get_inputs_name", &Sub_Op::getInputsName)
     .def("get_outputs_name", &Sub_Op::getOutputsName);
-
+    declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
 }  // namespace Aidge
diff --git a/python_binding/operator/pybind_Transpose.cpp b/python_binding/operator/pybind_Transpose.cpp
index f5fbaf0e75ddd81265fd17e0aeb18b54f3908627..59482cf481849738ed0656d8c55188b2ade51954 100644
--- a/python_binding/operator/pybind_Transpose.cpp
+++ b/python_binding/operator/pybind_Transpose.cpp
@@ -27,12 +27,15 @@ namespace Aidge {
 
 template <DimIdx_t DIM>
 void declare_Transpose(py::module &m) {
+  const std::string pyClassName("TransposeOp" + std::to_string(DIM) + "D");
   py::class_<Transpose_Op<DIM>, std::shared_ptr<Transpose_Op<DIM>>, Attributes, OperatorTensor>(
     m, ("TransposeOp" + std::to_string(DIM) + "D").c_str(), py::multiple_inheritance())
   .def("get_inputs_name", &Transpose_Op<DIM>::getInputsName)
   .def("get_outputs_name", &Transpose_Op<DIM>::getOutputsName)
   .def("attributes_name", &Transpose_Op<DIM>::staticGetAttrsName);
 
+  declare_registrable<Transpose_Op<DIM>>(m, pyClassName);
+
   m.def(("Transpose" + std::to_string(DIM) + "D").c_str(), [](const std::vector<DimSize_t>& output_dims_order,
                                                                   const std::string& name) {
         AIDGE_ASSERT(output_dims_order.size() == DIM, "output_dims_order size [{}] does not match DIM [{}]", output_dims_order.size(), DIM);
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index 6c4dd29dfbb158774ea86b181503e7e7e718bda4..52863735ca431e797fab3426d7e61796a8725dd2 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -23,6 +23,7 @@ void init_DataProvider(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
 void init_Attributes(py::module&);
+void init_Log(py::module&);
 void init_Operator(py::module&);
 void init_OperatorTensor(py::module&);
 
@@ -85,6 +86,7 @@ void init_Aidge(py::module& m){
 
     init_OperatorImpl(m);
     init_Attributes(m);
+    init_Log(m);
     init_Operator(m);
     init_OperatorTensor(m);
     init_Add(m);
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..10a02dcafefe089c8836ee7d4e3a9783a2aa96a6
--- /dev/null
+++ b/python_binding/utils/pybind_Log.cpp
@@ -0,0 +1,103 @@
+#include <pybind11/pybind11.h>
+#include "aidge/utils/Log.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+void init_Log(py::module& m){
+    py::enum_<Log::Level>(m, "Level")
+        .value("Debug", Log::Debug)
+        .value("Info", Log::Info)
+        .value("Notice", Log::Notice)
+        .value("Warn", Log::Warn)
+        .value("Error", Log::Error)
+        .value("Fatal", Log::Fatal);
+
+    py::class_<Log>(m, "Log")
+    .def_static("debug", [](const std::string& msg) { Log::debug(msg); }, py::arg("msg"),
+          R"mydelimiter(
+          Detailed messages for debugging purposes, providing information helpful 
+          for developers to trace and identify issues.
+          Detailed insights of what is appening in an operation, not useful for the
+          end-user. The operation is performed nominally.
+          Note: This level is disabled at compile time for Release, therefore
+          inducing no runtime overhead for Release.
+
+          :param msg: Debug message.
+          :type msg: str
+          )mydelimiter")
+    .def_static("info", [](const std::string& msg) { Log::info(msg); }, py::arg("msg"),
+          R"mydelimiter(
+          Messages that provide a record of the normal operation, about 
+          the application's state, progress, or important events.
+          Reports normal start, end and key steps in an operation. The operation is
+          performed nominally.
+
+          :param msg: Info message.
+          :type msg: str
+          )mydelimiter")
+    .def_static("notice", [](const std::string& msg) { Log::notice(msg); }, py::arg("msg"),
+          R"mydelimiter(
+          Applies to normal but significant conditions that may require monitoring,
+          like unusual or normal fallback events.
+          Reports specific paths in an operation. The operation can still be
+          performed normally.
+
+          :param msg: Notice message.
+          :type msg: str
+          )mydelimiter")
+    .def_static("warn", [](const std::string& msg) { Log::warn(msg); }, py::arg("msg"),
+          R"mydelimiter(
+          Indicates potential issues or situations that may lead to errors but do
+          not necessarily cause immediate problems.
+          Some specific steps of the operation could not be performed, but it can
+          still provide an exploitable result.
+
+          :param msg: Warning message.
+          :type msg: str
+          )mydelimiter")
+    .def_static("error",[](const std::string& msg) { Log::error(msg); }, py::arg("msg"),
+          R"mydelimiter(
+          Signifies a problem or unexpected condition that the application can 
+          recover from, but attention is needed to prevent further issues.
+          The operation could not be performed, but it does not prevent potential
+          further operations.
+
+          :param msg: Error message.
+          :type msg: str
+          )mydelimiter")
+    .def_static("fatal", [](const std::string& msg) { Log::fatal(msg); }, py::arg("msg"),
+          R"mydelimiter(
+          Represents a critical error or condition that leads to the termination of
+          the application, indicating a severe and unrecoverable problem.
+          The operation could not be performed and any further operation is
+          impossible.
+
+          :param msg: Fatal message.
+          :type msg: str
+          )mydelimiter")
+    .def_static("setConsoleLevel", &Log::setConsoleLevel, py::arg("level"),
+          R"mydelimiter(
+          Set the minimum log level displayed in the console.
+
+          :param level: Log level.
+          :type level: Level
+          )mydelimiter")
+    .def_static("setFileLevel", &Log::setFileLevel, py::arg("level"),
+          R"mydelimiter(
+          Set the minimum log level saved in the log file.
+
+          :param level: Log level.
+          :type level: Level
+          )mydelimiter")
+    .def_static("setFileName", &Log::setFileName, py::arg("fileName"),
+          R"mydelimiter(
+          Set the log file name.
+          Close the current log file and open the one with the new file name.
+          If empty, stop logging into a file.
+
+          :param fileName: Log file name.
+          :type fileName: str
+          )mydelimiter");
+}
+
+}
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index 1911da228c83d66117a2591adf47dc07cd8dc674..1439391b2e22fe0bea3b5a7692941afc67bc1c6b 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -25,14 +25,18 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op):
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
-    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
+        "a valid input is required at index {} for operator type {}",
+        inputIdx, mOp.type());
 
     // Requires the whole tensor by default
     return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
-    assert(mOp.getRawInput(inputIdx) && "requires valid input");
+    AIDGE_ASSERT(mOp.getRawInput(inputIdx),
+        "a valid input is required at index {} for operator type {}",
+        inputIdx, mOp.type());
 
     // Protect the whole tensor by default
     return std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx))->size();
@@ -40,19 +44,25 @@ Aidge::NbElts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx)
 
 Aidge::NbElts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outputIdx,
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
-    assert(mOp.getRawOutput(outputIdx) && "requires valid output");
+    AIDGE_ASSERT(mOp.getRawOutput(outputIdx),
+        "a valid output is required at index {} for operator type {}",
+        outputIdx, mOp.type());
 
     // Requires the whole tensor by default, regardless of available data on inputs
     return std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx))->size();
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
-    assert(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size());
+    AIDGE_ASSERT(static_cast<std::size_t>(inputIdx) < mNbConsumedData.size(),
+        "input index ({}) is out of bound ({}) for operator type {}",
+        inputIdx, mNbConsumedData.size(), mOp.type());
     return mNbConsumedData[static_cast<std::size_t>(inputIdx)];
 }
 
 Aidge::NbElts_t Aidge::OperatorImpl::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
-    assert(static_cast<std::size_t>(outputIdx) < mNbProducedData.size());
+    AIDGE_ASSERT(static_cast<std::size_t>(outputIdx) < mNbProducedData.size(),
+        "output index ({}) is out of bound ({}) for operator type {}",
+        outputIdx, mNbProducedData.size(), mOp.type());
     return mNbProducedData[static_cast<std::size_t>(outputIdx)];
 }
 
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 3681ac533cab36d68e5243fe0486b7d0febca694..005a7e679da5941d0995204b6c2a28a01ce376b4 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -328,19 +328,18 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
 }
 
 void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
-    std::set<NodePtr> startNodes = inputNodes();
-
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
     if (!dims.empty()){
-      AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of dimensions and graph inputs");
+      AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size());
       for (std::size_t i = 0; i < dims.size(); ++i){
         auto tensor = std::make_shared<Tensor>(dims[i]);
         mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
       }
     }
-      
+
+    // Ensure every node in the graph is correctly connected
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
         for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
             // assess if the input was not already set and is a Tensor then link it to parent output
@@ -352,7 +351,7 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                         nodePtr->getOperator()->associateInput(i, inputI.first->getOperator()->getRawOutput(inputI.second));
                     }
                     else {
-                        AIDGE_ASSERT(false, "Non-tensor entries not handled yet.\n");
+                        AIDGE_ASSERT(false, "Non-tensor entries not handled yet, for node {} (of type {}).", nodePtr->name(), nodePtr->type());
                     }
                 }
             } else {
@@ -362,54 +361,37 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
             }
 
         }
-
-        if (nodePtr->type() == Producer_Op::Type) {
-          startNodes.insert(nodePtr);
-        }
     }
-    // Compute dimensions of every node
-    _forwardDims(startNodes);
 
-}
-
-void Aidge::GraphView::_forwardDims(std::set<std::shared_ptr<Node>> listNodes) {
-    // TODO: support multi-inputs/outputs
-    std::set<std::shared_ptr<Node>> nextList = std::set<std::shared_ptr<Node>>();
-    for (std::shared_ptr<Node> nodePtr : listNodes) {
-        if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
-            const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
-            if (!op->outputDimsForwarded()) {
-                op->computeOutputDims();
-            }
-            if (!op->outputDimsForwarded()) { // try to compute output dimensions again later
-                nextList.insert(nodePtr);
-            } else { // compute output dimensions of children
-                std::set<std::shared_ptr<Node>> children = nodePtr->getChildren();
-                for (auto child : children) {
-                  const auto childOp = std::static_pointer_cast<OperatorTensor>(child->getOperator());
-                  if (!childOp->outputDimsForwarded()) {
-                    nextList.insert(child);
-                  }
-                }
-            }
-        }
-    }
-    if (nextList.empty()) {
-        for (std::shared_ptr<Node> nodePtr : getNodes()) {
+    // Compute dimensions of every node
+    std::set<std::shared_ptr<Node>> listNodes = getNodes();
+    do {
+        std::set<std::shared_ptr<Node>> nextList;
+        for (std::shared_ptr<Node> nodePtr : listNodes) {
             if (nodePtr->getOperator()->operatorType() == OperatorType::Tensor) {
-                if (!std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator())->outputDimsForwarded()) {
-                    nextList.insert(nodePtr);
-                }
+              const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
+              // Recompute everytime, even if it was already computed in a
+              // previous call of forwardDims(), as the graph may have changed!
+              op->computeOutputDims();
+              if (!op->outputDimsForwarded()) {
+                  nextList.insert(nodePtr);
+              }
             }
         }
-    }
 
-    // Internal check to make sure we won't enter in an infinite loop!
-    AIDGE_ASSERT(nextList != listNodes, "Unable to forward dimensions (circular dependency and/or wrong dimensions?)");
+        // Internal check to make sure we won't enter in an infinite loop!
+        if (nextList == listNodes) {
+            // We are stuck!
+            std::vector<std::string> nodesName;
+            std::transform(nextList.begin(), nextList.end(),
+                std::back_inserter(nodesName),
+                [](auto val){ return val->name() + " (" + val->type() + ")"; });
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Unable to forward dimensions (circular dependency and/or wrong dimensions?). Unable to compute output dims for nodes {}.", nodesName);
+        }
 
-    if (!nextList.empty()) {
-        _forwardDims(nextList);
+        listNodes.swap(nextList);
     }
+    while (!listNodes.empty());
 }
 
 void Aidge::GraphView::setBackend(const std::string &backend, DeviceIdx_t device) {
@@ -458,7 +440,7 @@ Aidge::GraphView::outputs(const std::string& nodeName) const {
 
 void Aidge::GraphView::setInputId(Aidge::IOIndex_t /*inID*/,
                                Aidge::IOIndex_t /*newNodeOutID*/) {
-  fmt::print("Not implemented yet.\n");
+  AIDGE_THROW_OR_ABORT(std::runtime_error, "Not implemented yet.");
 }
 
 void Aidge::GraphView::add(std::shared_ptr<Node> node, bool includeLearnableParam) {
@@ -714,10 +696,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents() const {
 
 std::vector<std::shared_ptr<Aidge::Node>> Aidge::GraphView::getParents(const std::string nodeName) const {
   std::map<std::string, std::shared_ptr<Node>>::const_iterator it = mNodeRegistry.find(nodeName);
-  if (it == mNodeRegistry.end()) {
-    fmt::print("No such node a {} in {} graph.\n", nodeName, name());
-    exit(-1);
-  }
+  AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name());
   return (it->second)->getParents();
 }
 
@@ -743,20 +722,15 @@ std::vector<std::vector<std::shared_ptr<Aidge::Node>>>
 Aidge::GraphView::getChildren(const std::string nodeName) const {
   std::map<std::string, std::shared_ptr<Node>>::const_iterator it =
       mNodeRegistry.find(nodeName);
-  if (it == mNodeRegistry.end()) {
-    fmt::print("No such node a {} in {} graph.\n", nodeName, name());
-    exit(-1);
-  }
+  AIDGE_ASSERT(it != mNodeRegistry.end(), "No node named {} in graph {}.", nodeName, name());
   return (it->second)->getOrderedChildren();
 }
 
 std::set<std::shared_ptr<Aidge::Node>>
 Aidge::GraphView::getChildren(const std::shared_ptr<Node> otherNode) const {
   std::set<std::shared_ptr<Node>>::const_iterator it = mNodes.find(otherNode);
-  if (it == mNodes.end()) {
-    fmt::print("No such node in graph.\n");
-    exit(-1);
-  }
+  AIDGE_ASSERT(it != mNodes.end(), "The node {} (of type {}) is not in graph {}.",
+    (otherNode) ? otherNode->name() : "#nullptr", (otherNode) ? otherNode->type() : "", name());
   return (*it)->getChildren();
 }
 
@@ -768,7 +742,7 @@ Aidge::GraphView::getNode(const std::string& nodeName) const {
   if (it != mNodeRegistry.cend()) {
     return it->second;
   } else {
-    fmt::print("No Node named {} in the current GraphView.\n", nodeName);
+    Log::warn("No Node named {} in the current GraphView {}.", nodeName, name());
     return nullptr;
   }
 }
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index 5d210144e2faa122416186734c52b67f1a0f8281..14e166402039230a283ce617e4997c9ad099eed9 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -169,7 +169,9 @@ Aidge::IOIndex_t Aidge::Node::nbValidOutputs() const {
 }
 
 void Aidge::Node::setInputId(const IOIndex_t inId, const IOIndex_t newNodeoutId) {
-    assert(inId != gk_IODefaultIndex && (inId < nbInputs()) && "Must be a valid index");
+    AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
+        "Input index ({}) is out of bound ({}) for node {} (of type {})",
+        inId, nbInputs(), name(), type());
     if (mIdOutParents[inId] != gk_IODefaultIndex) {
         fmt::print("Warning: filling a Tensor already attributed\n");
         auto originalParent = input(inId);
@@ -194,7 +196,7 @@ void Aidge::Node::addChildOp(std::shared_ptr<Node> otherNode, const IOIndex_t ou
         "Output index (#{}) of the node {} (of type {}) is out of bound (it has {} outputs), when trying to add the child node {} (of type {})",
         outId, name(), type(), nbOutputs(), otherNode->name(), otherNode->type());
     if (otherNode->input(otherInId).second != gk_IODefaultIndex) {
-        fmt::print("Warning, the {}-th Parent of the child node already existed.\n", otherInId);
+        Log::notice("Notice: the {}-th Parent of the child node {} (of type {}) already existed", otherInId, otherNode->name(), otherNode->type());
     }
     // manage tensors and potential previous parent
     otherNode->setInputId(otherInId, outId);
@@ -239,23 +241,29 @@ void Aidge::Node::addChild(std::shared_ptr<GraphView> otherView, const IOIndex_t
 
 void Aidge::Node::addParent(const std::shared_ptr<Node> other_node, const IOIndex_t inId) {
     if (getParent(inId) != nullptr) {
-        fmt::print("Warning, you're replacing a Parent.\n");
+        Log::notice("Notice: you are replacing an existing parent for node {} (of type {})", name(), type());
     }
-    assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
+    AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
+        "Input index ({}) is out of bound ({}) for node {} (of type {})",
+        inId, nbInputs(), name(), type());
     mParents[inId] = other_node;
 }
 
 std::vector<std::shared_ptr<Aidge::Node>> Aidge::Node::getParents() const { return mParents; }
 
 std::shared_ptr<Aidge::Node> Aidge::Node::popParent(const IOIndex_t inId) {
-    assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Input index out of bound.");
+    AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
+        "Input index ({}) is out of bound ({}) for node {} (of type {})",
+        inId, nbInputs(), name(), type());
     std::shared_ptr<Node> val = mParents[inId];
     removeParent(inId);
     return val;
 }
 
 bool Aidge::Node::removeParent(const IOIndex_t inId) {
-    assert((inId != gk_IODefaultIndex) && (inId < nbInputs()) && "Parent index out of bound.");
+    AIDGE_ASSERT(inId != gk_IODefaultIndex && inId < nbInputs(),
+        "Input index ({}) is out of bound ({}) for node {} (of type {})",
+        inId, nbInputs(), name(), type());
     if (mParents[inId]) {
         mParents[inId] = nullptr;
         mIdOutParents[inId] = gk_IODefaultIndex;
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index 289b2be90735d848e5083090d2ae4319a7490fde..e4213cad80ebdc177649b0c25e4fc49222993211 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -75,4 +75,7 @@ void Aidge::Operator::forward() {
     runHooks();
 }
 
-void Aidge::Operator::backward() { mImpl->backward(); }
+void Aidge::Operator::backward() {
+    AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
+    mImpl->backward(); 
+}
diff --git a/src/recipes/FuseMulAdd.cpp b/src/recipes/FuseMulAdd.cpp
index f408959a13d007853c24e30c1ef683648cf9c200..b57c1c3fc5e4b12dbd0004472a864ddaa864116e 100644
--- a/src/recipes/FuseMulAdd.cpp
+++ b/src/recipes/FuseMulAdd.cpp
@@ -64,7 +64,7 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     {
         // If both inputs are producers, there is an ambiguity, but both options
         // result in a correct solution.
-        fmt::print("Warning: both MatMul inputs are Producers, assume data at input#0 and weights at input#1.\n");
+        Log::notice("Notice: both MatMul inputs are Producers, assume data at input#0 and weights at input#1.");
         weight = matmulNode->getParent(1)->cloneSharedOperators();
     }
     AIDGE_ASSERT(weight != nullptr, "Could not deduce weight input for MatMul operator.");
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7649809339f4ebf716a7287f5744fb94a5b67ce2
--- /dev/null
+++ b/src/utils/Log.cpp
@@ -0,0 +1,59 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/utils/Log.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+#include <fmt/color.h>
+#include <fmt/chrono.h>
+
+Aidge::Log::Level Aidge::Log::mConsoleLevel = Info;
+Aidge::Log::Level Aidge::Log::mFileLevel = Debug;
+std::string Aidge::Log::mFileName = "aidge.log";
+std::unique_ptr<FILE, decltype(&std::fclose)> Aidge::Log::mFile {nullptr, nullptr};
+
+void Aidge::Log::log(Level level, const std::string& msg) {
+    if (level >= mConsoleLevel) {
+        // Apply log level style only for console.
+        // Styles that were already applied to msg with fmt are kept also in 
+        // the log file.
+        const auto modifier
+            = (level == Debug) ? fmt::fg(fmt::color::gray)
+            : (level == Notice) ? fmt::fg(fmt::color::light_yellow)
+            : (level == Warn) ? fmt::fg(fmt::color::orange)
+            : (level == Error) ? fmt::fg(fmt::color::red)
+            : (level == Fatal) ? fmt::bg(fmt::color::red)
+            : fmt::text_style();
+
+        fmt::println("{}", fmt::styled(msg, modifier));
+    }
+
+    if (level >= mFileLevel && !mFileName.empty()) {
+        if (!mFile) {
+            initFile(mFileName);
+        }
+
+        fmt::println(mFile.get(), msg);
+    }
+}
+
+void Aidge::Log::initFile(const std::string& fileName) {
+    mFile = std::unique_ptr<FILE, decltype(&std::fclose)>(std::fopen(fileName.c_str(), "a"), &std::fclose);
+
+    if (!mFile) {
+        mFileName.clear(); // prevents AIDGE_THROW_OR_ABORT() to try to log into file
+        AIDGE_THROW_OR_ABORT(std::runtime_error,
+            "Could not create log file: {}", fileName);
+    }
+
+    const std::time_t t = std::time(nullptr);
+    fmt::println(mFile.get(), "###### {:%Y-%m-%d %H:%M:%S} ######", fmt::localtime(t));
+}
diff --git a/unit_tests/utils/Test_Log.cpp b/unit_tests/utils/Test_Log.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3d8e672b84f5055a12185c3684c34bd888f0545b
--- /dev/null
+++ b/unit_tests/utils/Test_Log.cpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include "aidge/utils/Log.hpp"
+
+#include <fmt/color.h>
+
+using namespace Aidge;
+
+TEST_CASE("[core/log] Log") {
+    SECTION("TestLog") {
+        Log::setConsoleLevel(Log::Debug);
+        Log::debug("debug");
+        Log::debug("{}", fmt::styled("green debug", fmt::fg(fmt::color::green)));
+        Log::info("info");
+        Log::notice("notice");
+        Log::warn("warn");
+        Log::error("error");
+        Log::fatal("fatal");
+    }
+}