diff --git a/include/aidge/backend/OperatorImpl.hpp b/include/aidge/backend/OperatorImpl.hpp
index 4af7da64ebca3c02eb9aabca1f2dad88fd8b9829..d35ff36d3d3c21fec7c4ecb67245ed270fafd801 100644
--- a/include/aidge/backend/OperatorImpl.hpp
+++ b/include/aidge/backend/OperatorImpl.hpp
@@ -24,11 +24,11 @@
 
 namespace Aidge {
 class Node;
-class Operator;
+class AbsOperator;
 
 /**
  * @brief ImplSpec stores the requirements or the specifications of an implementation.
- * 
+ *
  */
 struct ImplSpec {
     struct IOSpec {
@@ -76,23 +76,23 @@ inline bool operator<(const ImplSpec& lhs, const ImplSpec& rhs) {
 /**
  * @brief Impl stores the details of a specific implementation.
  * It is associated to a ImplSpec in a registry.
- * 
+ *
  */
 template <class FwdFunc, class BwdFunc>
 struct Impl {
-    Impl(std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso_,
+    Impl(std::function<std::unique_ptr<ProdConso>(const AbsOperator&)> prodConso_,
       std::function<FwdFunc> forward_,
       std::function<BwdFunc> backward_ = nullptr):
         prodConso(prodConso_), forward(forward_), backward(backward_) {}
 
-    std::function<std::unique_ptr<ProdConso>(const Operator&)> prodConso;
+    std::function<std::unique_ptr<ProdConso>(const AbsOperator&)> prodConso;
     std::function<FwdFunc> forward;
     std::function<BwdFunc> backward;
 };
 
 class OperatorImpl {
 public:
-    OperatorImpl(const Operator& op, const std::string& backend = "");
+    OperatorImpl(const AbsOperator& op, const std::string& backend = "");
     virtual void forward();
     virtual void backward();
     virtual std::shared_ptr<ProdConso> prodConso();
@@ -101,14 +101,14 @@ public:
         return mBackend;
     }
 
-    const Operator& getOperator() const noexcept {
+    const AbsOperator& getOperator() const noexcept {
         return mOp;
     }
 
     /**
      * @brief Get the operator required implementation specification, according
      * to the current operator configuration.
-     * 
+     *
      */
     ImplSpec getRequiredSpec() const;
 
@@ -116,15 +116,15 @@ public:
      * @brief Get the best implementation that matches \p requiredSpecs.
      * If no implementation matches \p requiredSpecs, \p requiredSpecs is
      * returned.
-     * 
+     *
      */
     ImplSpec getBestMatch(const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Get an adapted meta operator corresponding to the required 
+     * @brief Get an adapted meta operator corresponding to the required
      * specifications \p requiredSpecs from the implementation specifications
      * \p spec.
-     * 
+     *
      * @param spec Implementation specification
      * @param requiredSpecs Required specifications
      * @return std::shared_ptr<Node> Adapted meta op or nullptr
@@ -132,12 +132,12 @@ public:
     std::shared_ptr<Node> getAdaptation(const ImplSpec& spec, const ImplSpec& requiredSpecs) const;
 
     /**
-     * @brief Get the best adapted meta operator corresponding to the required 
+     * @brief Get the best adapted meta operator corresponding to the required
      * specifications \p requiredSpecs.
      * The best adaptation is the one with the lowest overhead cost.
-     * Currently, it is the one requiring the least number of additionnal 
+     * Currently, it is the one requiring the least number of additionnal
      * operators to match the available implementations.
-     * 
+     *
      * @param requiredSpecs Required specifications
      * @return std::shared_ptr<Node> Adapted meta op or nullptr
      */
@@ -150,7 +150,7 @@ protected:
     virtual std::set<ImplSpec> getAvailableImplSpecs() const;
     bool checkIOSpec(const ImplSpec::IOSpec& required, const ImplSpec::IOSpec& spec) const;
 
-    const Operator &mOp;
+    const AbsOperator &mOp;
     const std::string mBackend;
     std::shared_ptr<ProdConso> mProdConso;
 };
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 6454ed233c561e386199e4db40ca698ee9edad8a..243a5d2f5db02c249b16d689d47d817f79b27e20 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -143,6 +143,6 @@ static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
 static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
         {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
 }  // namespace
-}  // namespace Aidge
+} // namespace Aidge
 
 #endif /* AIDGE_CPU_DATA_TENSORIMPL_H_ */
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 6c19b5355e406454a2e20bc8994d0ab04d53576a..7f9830f4d0ca7a7b838a1f0f96ef530968269807 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -31,7 +31,7 @@ class DataProvider {
 private:
     // Dataset providing the data to the dataProvider
     const Database& mDatabase;
-    
+
     // Desired size of the produced batches
     const std::size_t mBatchSize;
 
@@ -50,7 +50,7 @@ private:
     // mNbItems contains the number of items in the database
     std::size_t mNbItems;
     // mBatches contains the call order of each database item
-    std::vector<unsigned int> mBatches; 
+    std::vector<unsigned int> mBatches;
     // mIndex browsing the number of batch
     std::size_t mIndexBatch;
 
@@ -62,7 +62,7 @@ private:
     // Store each modality dimensions, backend and type
     std::vector<std::vector<std::size_t>> mDataDims;
     std::vector<std::string> mDataBackends;
-    std::vector<DataType> mDataTypes; 
+    std::vector<DataType> mDataTypes;
 
 public:
     /**
@@ -81,8 +81,8 @@ public:
 
     /**
      * @brief Get the Number of Batch.
-     * 
-     * @return std::size_t 
+     *
+     * @return std::size_t
      */
     inline std::size_t getNbBatch(){
         return mNbBatch;
@@ -90,8 +90,8 @@ public:
 
     /**
      * @brief Get the current Index Batch.
-     * 
-     * @return std::size_t 
+     *
+     * @return std::size_t
      */
     inline std::size_t getIndexBatch(){
         return mIndexBatch;
@@ -118,7 +118,7 @@ public:
 
     /**
      * @brief End condition of dataProvider for one pass on the database.
-     * 
+     *
      * @return true when all batch were fetched, False otherwise
      */
     inline bool done(){
@@ -129,15 +129,15 @@ public:
     // Functions for python iterator iter and next (definition in pybind.cpp)
     /**
      * @brief __iter__ method for iterator protocol
-     * 
-     * @return DataProvider* 
+     *
+     * @return DataProvider*
      */
     DataProvider* iter();
 
     /**
      * @brief __next__ method for iterator protocol
-     * 
-     * @return std::vector<std::shared_ptr<Aidge::Tensor>> 
+     *
+     * @return std::vector<std::shared_ptr<Aidge::Tensor>>
      */
     std::vector<std::shared_ptr<Aidge::Tensor>> next();
 };
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 58e893ca5d5339d93799415f076dd69d54db69ca..3cd108e2ed114ad681c312ebeb77a222dece30d9 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -891,6 +891,6 @@ private:
         mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
     }
 };
-}  // namespace Aidge
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_DATA_TENSOR_H_ */
diff --git a/include/aidge/filler/Filler.hpp b/include/aidge/filler/Filler.hpp
index fe39771b634278909f7eef20068cb941f9922ab8..d06d787909d2df1685cd08a77f22da298975a0d0 100644
--- a/include/aidge/filler/Filler.hpp
+++ b/include/aidge/filler/Filler.hpp
@@ -45,6 +45,6 @@ template <typename T>
 void heFiller(std::shared_ptr<Tensor> tensor, VarianceNorm varianceNorm = VarianceNorm::FanIn,
               T meanNorm = 0.0, T scaling = 1.0);
 
-}  // namespace Aidge
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_FILLER_FILLER_H_ */
diff --git a/include/aidge/graph/Connector.hpp b/include/aidge/graph/Connector.hpp
index 599ca7d6defd729b6e6536dcc95f326d345701d9..87090e2dc63ec83058e4dc4c45da70bf4313b4ca 100644
--- a/include/aidge/graph/Connector.hpp
+++ b/include/aidge/graph/Connector.hpp
@@ -81,6 +81,6 @@ class Connector {
  * @return std::shared_ptr<GraphView>
  */
 std::shared_ptr<GraphView> generateGraph(std::vector<Connector> ctors);
-}  // namespace Aidge
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_GRAPH_CONNECTOR_H_ */
\ No newline at end of file
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index efdb06c4ac6d0e6898d899cc639a88d1da301000..1c2f68dac4420651d8b4b65c99567cf41b70890a 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -595,6 +595,6 @@ private:
  * @return GraphView GraphView containing all nodes with a path to node.
 */
 std::shared_ptr<GraphView> getConnectedGraphView(std::shared_ptr<Node> node);
-}  // namespace Aidge
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_GRAPH_GRAPHVIEW_H_ */
diff --git a/include/aidge/graph/Matching.hpp b/include/aidge/graph/Matching.hpp
index 951aa6b29d73d9055cf9f13c8ddc6313cb506879..e8e21e89d67267f55ac20eac15a4b4e1e72be302 100644
--- a/include/aidge/graph/Matching.hpp
+++ b/include/aidge/graph/Matching.hpp
@@ -141,7 +141,7 @@ public:
 
     /**
      * @brief Same as match() but with a mandatory start node.
-     * 
+     *
      * @param startNode Mandatory start node for the query.
      * @param query The query to search.
      * @return MatchingResult MatchingResult struct, with empty graph if query
@@ -229,6 +229,6 @@ inline bool operator<(const Aidge::SinglePassGraphMatching::MatchingResult& lhs,
     // Matches rootNode are garanteed to be different!
     return lhs.graph->rootNode() < rhs.graph->rootNode();
 }
-}  // namespace Aidge
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_GRAPH_MATCHING_H_ */
diff --git a/include/aidge/graph/Node.hpp b/include/aidge/graph/Node.hpp
index e014b041fdad94f5f17d636a2da92180de59e152..fdde7dda76a96e4808d1f0bdb785cc3a599d9dd1 100644
--- a/include/aidge/graph/Node.hpp
+++ b/include/aidge/graph/Node.hpp
@@ -57,7 +57,7 @@ private:
   std::shared_ptr<DynamicAttributes> mAttrs;
 
   std::set<std::weak_ptr<GraphView>, weakCompare> mViews; /** Set of pointers to GraphView instances including this Node instance. */
-  const std::shared_ptr<Operator> mOperator; // Pointer to the associated Operator
+  const std::shared_ptr<AbsOperator> mOperator; // Pointer to the associated Operator
 
   std::vector<NodePtr> mParents; /** List of parent node for each input (Parent --> Node --> Child) */
   std::vector<std::vector<std::weak_ptr<Node>>> mChildren; /** List of children nodes for each output (Parent --> Node --> Child) */
@@ -75,15 +75,15 @@ public:
    * @param op Operator giving the Node its number of connections.
    * @param attrs Attributes for the Node.
    */
-  Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs);
-  Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs);
+  Node(std::shared_ptr<AbsOperator> op, std::shared_ptr<DynamicAttributes> attrs);
+  Node(std::shared_ptr<AbsOperator> op, const DynamicAttributes& attrs);
 
   /**
    * @brief Construct a new Node object associated with the input Operator.
    * @param op Operator giving the Node its number of connections.
    * @param name (optional) name for the Node.
    */
-  Node(std::shared_ptr<Operator> op, const std::string& name = "");
+  Node(std::shared_ptr<AbsOperator> op, const std::string& name = "");
 
   virtual ~Node();
 
@@ -172,7 +172,7 @@ public:
    * @brief Get the Operator object of the Node.
    * @return std::shared_ptr<Operator>
    */
-  inline std::shared_ptr<Operator> getOperator() const { return (*mOperator)(mAttrs); }
+  inline std::shared_ptr<AbsOperator> getOperator() const { return (*mOperator)(mAttrs); }
 
   ///////////////////////////////////////////////////////
   //        TENSOR MANAGEMENT
@@ -498,7 +498,7 @@ private:
   ///////////////////////////////////////////////////////
 
   // cannot change operator for now
-  // void setOperator(const std::shared_ptr<Operator> op_ptr);
+  // void setOperator(const std::shared_ptr<AbsOperator> op_ptr);
 
   ///////////////////////////////////////////////////////
   //        TENSOR MANAGEMENT
diff --git a/include/aidge/graphRegex/GraphFsmInterpreter.hpp b/include/aidge/graphRegex/GraphFsmInterpreter.hpp
index e2fd43b9e641e8cb4a695e3a3eecf5975610d564..5ac37ae6d3e224d8336398ba7e7522e6747552ab 100644
--- a/include/aidge/graphRegex/GraphFsmInterpreter.hpp
+++ b/include/aidge/graphRegex/GraphFsmInterpreter.hpp
@@ -27,7 +27,7 @@ namespace Aidge {
 
         std::shared_ptr<FsmGraph>  interpret(void);
 
-        
+
 
         private:
 
diff --git a/include/aidge/nodeTester/ConditionalInterpreter.hpp b/include/aidge/nodeTester/ConditionalInterpreter.hpp
index af6a3b920bb9ca389724860d55250d7ef4540677..fc01e9f2acd44b12728707292628f1fd3184e309 100644
--- a/include/aidge/nodeTester/ConditionalInterpreter.hpp
+++ b/include/aidge/nodeTester/ConditionalInterpreter.hpp
@@ -130,7 +130,7 @@ class ConditionalRegisterFunction {
                 errorMessage << "bad Number of argument: get " << args.size() << " need " << sizeof...(ParamsIdx) << "\n";
                 throw std::runtime_error(errorMessage.str());
             }
-    		//we used std::vector< std::shared_ptr<ConditionalData>> as a fifo 
+    		//we used std::vector< std::shared_ptr<ConditionalData>> as a fifo
             std::size_t offset = args.size()-sizeof...(ParamsIdx);
 
     		using FuncTraits = function_traits<decltype(f)>;
diff --git a/include/aidge/operator/Abs.hpp b/include/aidge/operator/Abs.hpp
index f1dc37003fbff9463d041030818ec0534c5ac1fd..4075e8614bc8182a63e4510fffe11b8a4d98aaa9 100644
--- a/include/aidge/operator/Abs.hpp
+++ b/include/aidge/operator/Abs.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Abs_Op : public OperatorTensor,
     public Registrable<Abs_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Abs_Op&)>> {
@@ -49,7 +50,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Abs_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<Abs_Op>(*this);
     }
 
@@ -67,6 +68,8 @@ public:
 inline std::shared_ptr<Node> Abs(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<Abs_Op>(), name);
 }
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_ABS_H_ */
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index f96996079b9e89f80c78b8e409830369480705a8..61f64a18af0e4864e76ae0bf15b2835bb7cecc1d 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
+namespace Operator {
 
 class Add_Op : public OperatorTensor,
     public Registrable<Add_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Add_Op&)>> {
@@ -41,7 +42,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Add_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -67,6 +68,8 @@ public:
 };
 
 std::shared_ptr<Node> Add(const IOIndex_t nbIn, const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_ADD_H_ */
diff --git a/include/aidge/operator/And.hpp b/include/aidge/operator/And.hpp
index e4f04e2fa3ec2a4a01f023b9ab203e6b2ab36e76..8f55b6f5ce0ca8fbc56a10e8231e5004e35f831e 100644
--- a/include/aidge/operator/And.hpp
+++ b/include/aidge/operator/And.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 /**
  * @brief Tensor element-wise logical and operation.
@@ -57,7 +58,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::And_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<And_Op>(*this);
     }
 
@@ -77,6 +78,8 @@ public:
 inline std::shared_ptr<Node> And(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<And_Op>(), name);
 }
+
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_AND_H_ */
diff --git a/include/aidge/operator/ArgMax.hpp b/include/aidge/operator/ArgMax.hpp
index 13f63ce98c526f0c57a363ada4e7f50ccdbfb83b..8e5c861e6797b9beace91f56b3fc0360981502df 100644
--- a/include/aidge/operator/ArgMax.hpp
+++ b/include/aidge/operator/ArgMax.hpp
@@ -26,23 +26,25 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ArgMaxAttr { Axis, KeepDims, SelectLastIndex };
+namespace Operator {
 
 /**
  * @brief This operator has as purpose to reduce given dimension by replacing with the Max value's index.
 */
 class ArgMax_Op : public OperatorTensor,
                 public Registrable<ArgMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ArgMax_Op &)>> {
+public:
+enum class mAttr { Axis, KeepDims, SelectLastIndex };
 
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ArgMaxAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                         std::int32_t,
                                         bool,
                                         bool>;
-    template <ArgMaxAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -52,17 +54,17 @@ public:
     /**
      * @brief constructor for ArgMax op
      * @param[in] axis around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axis and
      * if false we remove the dimension completely
-     * @param[in] select_last_index in case we have many maximum, if true the last index is returned 
-     * if false the first index is returned. 
+     * @param[in] select_last_index in case we have many maximum, if true the last index is returned
+     * if false the first index is returned.
      */
     ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_index)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<ArgMaxAttr::Axis>(axis),
-            attr<ArgMaxAttr::KeepDims>(keep_dims),
-            attr<ArgMaxAttr::SelectLastIndex>(select_last_index)))
+            attr<mAttr::Axis>(axis),
+            attr<mAttr::KeepDims>(keep_dims),
+            attr<mAttr::SelectLastIndex>(select_last_index)))
     {}
 
     /**
@@ -84,7 +86,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ArgMax_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<ArgMax_Op>(*this);
     }
 
@@ -94,9 +96,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::Axis>(); }
-    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::KeepDims>(); }
-    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<ArgMaxAttr::SelectLastIndex>(); }
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<mAttr::Axis>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<mAttr::KeepDims>(); }
+    inline bool& selectLastIndex() const noexcept { return mAttributes -> getAttr<mAttr::SelectLastIndex>(); }
 
 
     static const std::vector<std::string> getInputsName() {
@@ -114,23 +116,24 @@ public:
  * @param axis Dimension over which data max should be computed.
  * @param keep_dims Whether or not reduced dimensions are to be erased.
  * @param select_last_index Whether to select the last index of max elements in case there are many maximums.
- * By default the first max element index is 
+ * By default the first max element index is
  * @param name Name of the Operator.
  * @return std::shared_ptr<Node> Node containing the Operator.
  */
 inline std::shared_ptr<Node> ArgMax(std::int32_t axis=0,
                                     bool keep_dims=true,
                                     bool select_last_index=false,
-                                    const std::string& name = "") {
+                                    const std::string& name = "")
+{
     return std::make_shared<Node>(std::make_shared<ArgMax_Op>(axis, keep_dims, select_last_index), name);
-
 }
 
-}  // namespace Aidge
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ArgMaxAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
+const char *const EnumStrings<Aidge::Operator::ArgMax_Op::mAttr>::data[] = {"axis", "keep_dims", "select_last_index"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_ARGMAX_H_ */
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 54b40907e8b4127b7b96b95b229440d782149c3d..a96240e380ca97646ae25e293b0cd1d19ffaba28 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -24,20 +24,23 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class AvgPoolingAttr { StrideDims, KernelDims };
+namespace Operator {
+
 
 template <DimIdx_t DIM>
 class AvgPooling_Op : public OperatorTensor,
                 public Registrable<AvgPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const AvgPooling_Op<DIM> &)>> {
+public:
+enum class mAttr { StrideDims, KernelDims };
 
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<AvgPoolingAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
-    template <AvgPoolingAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -50,8 +53,8 @@ public:
                             const std::array<DimSize_t, DIM> &stride_dims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
-                        attr<AvgPoolingAttr::StrideDims>(stride_dims),
-                        attr<AvgPoolingAttr::KernelDims>(kernel_dims)))
+                        attr<mAttr::StrideDims>(stride_dims),
+                        attr<mAttr::KernelDims>(kernel_dims)))
     {}
 
     /**
@@ -64,7 +67,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::AvgPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override final;
+    std::shared_ptr<AbsOperator> clone() const override final;
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
@@ -80,8 +83,8 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<AvgPoolingAttr::StrideDims>(); }
-    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<AvgPoolingAttr::KernelDims>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -105,7 +108,7 @@ inline std::shared_ptr<Node> AvgPooling(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by AvgPooling, not supported");
     return AvgPooling(to_array(kernel_dims), name, stride_dims);
 }
-}  // namespace Aidge
+} // namespace Aidge
 
 extern template class Aidge::AvgPooling_Op<1>;
 extern template class Aidge::AvgPooling_Op<2>;
@@ -114,10 +117,24 @@ extern template class Aidge::AvgPooling_Op<4>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
+const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<1>::mAttr>::data[] = {
     "stride_dims",
     "kernel_dims"
 };
-}
+const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<2>::mAttr>::data[] = {
+    "stride_dims",
+    "kernel_dims"
+};
+const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<3>::mAttr>::data[] = {
+    "stride_dims",
+    "kernel_dims"
+};
+const char *const EnumStrings<Aidge::Operator::AvgPooling_Op<4>::mAttr>::data[] = {
+    "stride_dims",
+    "kernel_dims"
+};
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_AVGPOOLING_H_ */
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index cdac7935f6ded752201c04b2dda6cfb9e06438ec..566893cd4346677242b2cadeac80b31daf6d56bb 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -23,18 +23,21 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
-enum class BatchNormAttr { Epsilon, Momentum };
 
 template <DimIdx_t DIM>
 class BatchNorm_Op : public OperatorTensor,
                 public Registrable<BatchNorm_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const BatchNorm_Op<DIM> &)>> {
+public:
+    enum class mAttr { Epsilon, Momentum };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<BatchNormAttr, float, float>;
-    template <BatchNormAttr e>
+    using Attributes_ = StaticAttributes<mAttr, float, float>;
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -51,8 +54,8 @@ public:
                                 InputCategory::Param},
                             1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<BatchNormAttr::Epsilon>(epsilon),
-            attr<BatchNormAttr::Momentum>(momentum))) {}
+            attr<mAttr::Epsilon>(epsilon),
+            attr<mAttr::Momentum>(momentum))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -64,7 +67,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::BatchNorm_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     // Data operator[](const char* inputName) override final {
     //     std::shared_ptr<Tensor> in = (strcmp(inputName, "data")) ? mInputs[0] :
@@ -82,8 +85,8 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline float& epsilon() const { return mAttributes->template getAttr<BatchNormAttr::Epsilon>(); }
-    inline float& momentum() const { return mAttributes->template getAttr<BatchNormAttr::Momentum>(); }
+    inline float& epsilon() const { return mAttributes->template getAttr<mAttr::Epsilon>(); }
+    inline float& momentum() const { return mAttributes->template getAttr<mAttr::Momentum>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input", "scale", "shift", "mean", "variance"};
@@ -102,15 +105,19 @@ std::shared_ptr<Node> BatchNorm(const DimSize_t nbFeatures,
                                        const float epsilon = 1.0e-5F,
                                        const float momentum = 0.1F,
                                        const std::string& name = "");
-}  // namespace Aidge
 
-extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
-extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
-extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
+} // namespace Operator
+} // namespace Aidge
+
+extern template std::shared_ptr<Aidge::Node> Aidge::Operator::BatchNorm<2>(const DimSize_t, const float, const float, const std::string&);
+extern template std::shared_ptr<Aidge::Node> Aidge::Operator::BatchNorm<3>(const DimSize_t, const float, const float, const std::string&);
+extern template std::shared_ptr<Aidge::Node> Aidge::Operator::BatchNorm<4>(const DimSize_t, const float, const float, const std::string&);
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum" };
+const char *const EnumStrings<Aidge::Operator::BatchNorm_Op<2>::mAttr>::data[] = { "epsilon", "momentum" };
+const char *const EnumStrings<Aidge::Operator::BatchNorm_Op<3>::mAttr>::data[] = { "epsilon", "momentum" };
+const char *const EnumStrings<Aidge::Operator::BatchNorm_Op<4>::mAttr>::data[] = { "epsilon", "momentum" };
 }
 
 #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/BitShift.hpp b/include/aidge/operator/BitShift.hpp
index bd14bea76937fbfc42cbafa9636df9b55832fa9d..27c5faea0a09e23982e6d6a72281ff1c8841c39f 100644
--- a/include/aidge/operator/BitShift.hpp
+++ b/include/aidge/operator/BitShift.hpp
@@ -23,9 +23,8 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/StaticAttributes.hpp"
 
-
 namespace Aidge {
-    enum class BitShiftAttr { BitShiftdirection };
+namespace Operator {
 
 /**
  * @brief Tensor BitShift Operator
@@ -33,19 +32,20 @@ namespace Aidge {
 class BitShift_Op : public OperatorTensor,
     public Registrable<BitShift_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const BitShift_Op&)>> {
 public:
+    enum class mAttr { BitShiftdirection };
     enum BitShiftDirection {left,right};
     static const std::string Type;
-private:     
+private:
 
-    using Attributes_ = StaticAttributes<BitShiftAttr,BitShiftDirection>;
-    template <BitShiftAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr,BitShiftDirection>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 public:
 
-    BitShift_Op(BitShiftDirection direction) 
+    BitShift_Op(BitShiftDirection direction)
     : OperatorTensor(Type, {InputCategory::Data, InputCategory::Data}, 1),
     mAttributes(std::make_shared<Attributes_>(
-                attr<BitShiftAttr::BitShiftdirection>(direction))) 
+                attr<mAttr::BitShiftdirection>(direction)))
                 {}
 
     /**¨PPPP
@@ -67,33 +67,33 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::BitShift_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<BitShift_Op>(*this);
     }
 
     bool forwardDims(bool allowDataDependency = false) override final;
-   
+
     /**
      * @brief Setter to specify which backend to use
-     * 
+     *
      * @return Boolean
      */
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     std::set<std::string> getAvailableBackends() const override;
-    
+
     /**
      * @brief Getter to retrieve Attributes of the bitshift class
-     * 
+     *
      * @return Attributes
      */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
     /**
      * @brief Retrieve the direction in which the shift should be applied (right or left)
-     * 
-     * @return BitShiftDirection 
+     *
+     * @return BitShiftDirection
      */
-    inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<BitShiftAttr::BitShiftdirection>(); }
+    inline BitShiftDirection& direction() const noexcept { return mAttributes ->template getAttr<mAttr::BitShiftdirection>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"InputTensor", "ShiftAmount"};
@@ -105,20 +105,22 @@ public:
 
 };
 /**
- * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in 
-    the direction specified by "direction" 
+ * @brief The bitwise shift operator performs an element-wise operation between the input tensor and the shift tensor in
+    the direction specified by "direction"
  * @param[in] direction Direction of the bitshift (Left or Right)
  * @param[in] name Name of the node
- * @return std::shared_ptr<Node> 
+ * @return std::shared_ptr<Node>
  */
-    inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") {
-        return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
-    }
+inline std::shared_ptr<Node> BitShift(const BitShift_Op::BitShiftDirection direction, const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<BitShift_Op>(direction), name);
+}
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BitShiftAttr>::data[] = {"BitShiftdirection"};
+const char *const EnumStrings<Aidge::Operator::BitShift_Op::mAttr>::data[] = {"BitShiftdirection"};
 
 }
 
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 3fa1bb22a0dd9def11e0621b67cbd8395b5344fa..eb2e22314c2fd886d1583b3a9a9eae01c5ee9ace 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -24,22 +24,26 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace AbsOperator {
+
 class Cast_OpImpl : public OperatorImpl {
 public:
-    Cast_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Cast_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
-enum class CastAttr { TargetType };
 
 class Cast_Op : public OperatorTensor,
     public Registrable<Cast_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Cast_Op&)>> {
+public:
+enum class mAttr { TargetType };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<CastAttr, DataType>;
-    template <CastAttr e>
+    using Attributes_ = StaticAttributes<mAttr, DataType>;
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -50,7 +54,7 @@ public:
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
-     * @param op Operator to copy.
+     * @param op AbsOperator to copy.
      */
     Cast_Op(const Cast_Op& op)
         : OperatorTensor(op),
@@ -66,9 +70,9 @@ public:
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Cast_Op
+     * @see AbsOperator::Cast_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<Cast_Op>(*this);
     }
 
@@ -76,7 +80,7 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline DataType& targetType() const { return mAttributes->template getAttr<CastAttr::TargetType>(); }
+    inline DataType& targetType() const { return mAttributes->template getAttr<mAttr::TargetType>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -89,11 +93,12 @@ public:
 
 std::shared_ptr<Node> Cast(const DataType targetType, const std::string& name = "");
 
+} // namespace AbsOperator
 } // namespace Aidge
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
+const char* const EnumStrings<Aidge::Operator::Cast_Op::mAttr>::data[] = { "target_type" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 98835dd2a4b02e51b50636ee8606382a50ba7b89..a97cef5486cbed38f1c9a08d334e284b777f89a6 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -26,24 +26,28 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Concat_OpImpl : public OperatorImpl {
 public:
-    Concat_OpImpl(const Operator& op, const std::string& backend = "")
+    Concat_OpImpl(const AbsOperator& op, const std::string& backend = "")
         : OperatorImpl(op, backend)
     {}
     void forward() override;
 };
 
-enum class ConcatAttr { Axis };
 
 class Concat_Op : public OperatorTensor,
     public Registrable<Concat_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Concat_Op&)>> {
+public:
+enum class mAttr { Axis };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ConcatAttr, std::int32_t>;
-    template <ConcatAttr e>
+    using Attributes_ = StaticAttributes<mAttr, std::int32_t>;
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -62,7 +66,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Concat_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -70,7 +74,7 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::int32_t& axis() const { return mAttributes->template getAttr<ConcatAttr::Axis>(); }
+    inline std::int32_t& axis() const { return mAttributes->template getAttr<mAttr::Axis>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input_0", "data_input_n"};
@@ -81,11 +85,13 @@ public:
 };
 
 std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axis = 0, const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
     template <>
-    const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
+    const char* const EnumStrings<Aidge::Operator::Concat_Op::mAttr>::data[] = {
         "axis"
     };
 }
diff --git a/include/aidge/operator/ConstantOfShape.hpp b/include/aidge/operator/ConstantOfShape.hpp
index 18e626544606fd150b2843d2367aa8858669c2ba..1f48ee60506ba5fff5c91f4cb88adb01f5c0252b 100644
--- a/include/aidge/operator/ConstantOfShape.hpp
+++ b/include/aidge/operator/ConstantOfShape.hpp
@@ -31,6 +31,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 enum class ConstantOfShapeAttr {
   /**
@@ -63,7 +64,7 @@ private:
 public:
   /**
    * @brief constructor for ConstantOfShape_op
-   * @param[in] value : a scalar tensor which holds the value that will 
+   * @param[in] value : a scalar tensor which holds the value that will
    * fill the output tensor
    */
   ConstantOfShape_Op(const Tensor &value = Tensor(0.f))
@@ -90,7 +91,7 @@ public:
    * @brief Clone the operator using its copy-constructor.
    * @see Operator::MatMul_Op
    */
-  std::shared_ptr<Operator> clone() const override final {
+  std::shared_ptr<AbsOperator> clone() const override final {
     return std::make_shared<ConstantOfShape_Op>(*this);
   }
 
@@ -125,6 +126,8 @@ inline std::shared_ptr<Node> ConstantOfShape(const Tensor value = Tensor(0.f),
   return std::make_shared<Node>(std::make_shared<ConstantOfShape_Op>(value),
                                 name);
 }
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index cd1a57dd9ac52d2f5cdff3b5ed54c6dd2aeeed34..90d356af7b46a6cc4d352c98f3e5bcb579eb63c8 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -30,17 +30,21 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvAttr { StrideDims, DilationDims, KernelDims };
+namespace Operator {
+
 
 template <DimIdx_t DIM>
 class Conv_Op : public OperatorTensor,
                 public Registrable<Conv_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Conv_Op<DIM> &)>> {
 
+public:
+    enum class mAttr { StrideDims, DilationDims, KernelDims };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ConvAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>>;
@@ -71,7 +75,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Conv_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<Conv_Op<DIM>>(*this);
     }
 
@@ -114,7 +118,7 @@ public:
     }
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); }
     inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvAttr::DilationDims>(); }
     inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvAttr::KernelDims>(); }
 
@@ -162,14 +166,21 @@ inline std::shared_ptr<Node> Conv(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Conv, not supported");
     return Conv(inChannels, outChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
 }
-}  // namespace Aidge
 
-extern template class Aidge::Conv_Op<1>;
-extern template class Aidge::Conv_Op<2>;
+} // namespace Operator
+} // namespace Aidge
+
+extern template class Aidge::Operator::Conv_Op<1>;
+extern template class Aidge::Operator::Conv_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
+const char *const EnumStrings<Aidge::Operator::Conv_Op<1>::mAttr>::data[] = {
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
+};
+const char *const EnumStrings<Aidge::Operator::Conv_Op<2>::mAttr>::data[] = {
     "stride_dims",
     "dilation_dims",
     "kernel_dims"
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index f0a55a299094add58bd3938e9cca9bbb48e21da8..8f03d47559fda786e06b0c03194c56ecce3697cd 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -29,20 +29,24 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ConvDepthWiseAttr { StrideDims, DilationDims, KernelDims };
+namespace Operator {
+
 
 template <DimIdx_t DIM>
 class ConvDepthWise_Op : public OperatorTensor,
                 public Registrable<ConvDepthWise_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const ConvDepthWise_Op<DIM> &)>> {
+public:
+enum class mAttr { StrideDims, DilationDims, KernelDims };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ConvDepthWiseAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>>;
-    template <ConvDepthWiseAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -55,9 +59,9 @@ public:
                                const std::array<DimSize_t, DIM> &dilation_dims = create_array<DimSize_t,DIM>(1))
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::Param, InputCategory::OptionalParam}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<ConvDepthWiseAttr::StrideDims>(stride_dims),
-            attr<ConvDepthWiseAttr::DilationDims>(dilation_dims),
-            attr<ConvDepthWiseAttr::KernelDims>(kernel_dims)))
+            attr<mAttr::StrideDims>(stride_dims),
+            attr<mAttr::DilationDims>(dilation_dims),
+            attr<mAttr::KernelDims>(kernel_dims)))
     {}
 
     /**
@@ -70,7 +74,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ConvDepthWise_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<ConvDepthWise_Op<DIM>>(*this);
     }
 
@@ -93,9 +97,9 @@ public:
     }
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::StrideDims>(); }
-    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::DilationDims>(); }
-    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<ConvDepthWiseAttr::KernelDims>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<mAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "weight", "bias"};
@@ -125,14 +129,17 @@ inline std::shared_ptr<Node> ConvDepthWise(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ConvDepthWise, not supported");
     return ConvDepthWise(nbChannels, to_array(kernelDims), name, strideDims, dilationDims, noBias);
 }
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 extern template class Aidge::ConvDepthWise_Op<1>;
 extern template class Aidge::ConvDepthWise_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"stride_dims", "dilation_dims",
+const char *const EnumStrings<Aidge::Operator::ConvDepthWise_Op<1>::mAttr>::data[] = {"stride_dims", "dilation_dims",
+const char *const EnumStrings<Aidge::Operator::ConvDepthWise_Op<2>::mAttr>::data[] = {"stride_dims", "dilation_dims",
                                                           "kernel_dims"};
 }
 
diff --git a/include/aidge/operator/DepthToSpace.hpp b/include/aidge/operator/DepthToSpace.hpp
index 856cd0e85d1abb47d3c163115bef6cbfb59bb66f..bf3f155a954c3e4f270d2edc0d7da1807ceabb14 100644
--- a/include/aidge/operator/DepthToSpace.hpp
+++ b/include/aidge/operator/DepthToSpace.hpp
@@ -23,26 +23,30 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class DepthToSpace_OpImpl : public OperatorImpl {
 public:
-    DepthToSpace_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    DepthToSpace_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
-enum class DepthToSpaceAttr { BlockSize, Mode };
 
 
 class DepthToSpace_Op : public OperatorTensor,
                 public Registrable<DepthToSpace_Op,
                     std::string,
                     std::function<std::shared_ptr<OperatorImpl>(const DepthToSpace_Op &)>> {
+public:
+enum class mAttr { BlockSize, Mode };
+
 public:
     static const std::string Type;
     enum class Mode { DCR, CRD };
 
 private:
-    using Attributes_ = StaticAttributes<DepthToSpaceAttr, std::uint32_t, Mode>;
-    template <DepthToSpaceAttr e>
+    using Attributes_ = StaticAttributes<mAttr, std::uint32_t, Mode>;
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -63,7 +67,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::DepthToSpace_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -71,8 +75,8 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<DepthToSpaceAttr::BlockSize>(); }
-    inline Mode& mode() const { return mAttributes->template getAttr<DepthToSpaceAttr::Mode>(); }
+    inline std::uint32_t& blockSize() const { return mAttributes->template getAttr<mAttr::BlockSize>(); }
+    inline Mode& mode() const { return mAttributes->template getAttr<mAttr::Mode>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -86,11 +90,12 @@ std::shared_ptr<Node> DepthToSpace(const std::uint32_t blockSize,
                                     const DepthToSpace_Op::Mode mode = DepthToSpace_Op::Mode::CRD,
                                     const std::string& name = "");
 
-}  // namespace Aidge
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::DepthToSpaceAttr>::data[] = { "block_size", "mode" };
+const char *const EnumStrings<Aidge::Operator::DepthToSpace_Op::mAttr>::data[] = { "block_size", "mode" };
 }
 
 #endif //AIDGE_CORE_OPERATOR_DEPTHTOSPACE_H_
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 5ed9e789deab71b107a6071ab11452c3cf73fa9d..3eb0e93a0ebf5402a2ea269e0f973e19494767a7 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Div_Op : public OperatorTensor,
     public Registrable<Div_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Div_Op&)>> {
@@ -50,7 +51,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Div_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<Div_Op>(*this);
     }
 
@@ -69,6 +70,7 @@ public:
 
 std::shared_ptr<Node> Div(const std::string& name = "");
 
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_DIV_H_ */
diff --git a/include/aidge/operator/Erf.hpp b/include/aidge/operator/Erf.hpp
index 88a4bfd29e7d27e7eaea00d967e0ba631354d253..38426962f6536a01d19441f313845010bac31157 100644
--- a/include/aidge/operator/Erf.hpp
+++ b/include/aidge/operator/Erf.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Erf_Op : public OperatorTensor,
     public Registrable<Erf_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Erf_Op&)>> {
@@ -41,7 +42,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Erf_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     std::set<std::string> getAvailableBackends() const override;
@@ -55,6 +56,8 @@ public:
 };
 
 std::shared_ptr<Node> Erf(const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_ERF_H_ */
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 592ba4e2b796ba1aede24a737e296ddf1e285499..0269e4db636fa8499b8b5d9eca22007c4ae0fe44 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -24,6 +24,8 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
+namespace Operator {
+
 class FC_Op : public OperatorTensor,
               public Registrable<FC_Op,
                                  std::string,
@@ -53,7 +55,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::FC_Op
      */
-    std::shared_ptr<Operator> clone() const override final;
+    std::shared_ptr<AbsOperator> clone() const override final;
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
@@ -86,6 +88,7 @@ public:
 
 std::shared_ptr<Node> FC(const DimSize_t inChannels, const DimSize_t outChannels, bool noBias = false, const std::string& name = "");
 
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_FC_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 517d63adc59ed848c53852697ab9f8511dfc2a2a..9e0dae9695d30a19234f63369d7a7edda5662e62 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -30,11 +30,13 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
+namespace Operator {
 
 template <DimIdx_t DIM>
 class Fold_Op : public OperatorTensor,
                 public Registrable<Fold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Fold_Op<DIM> &)>> {
+public:
+enum class FoldAttr { OutputDims, StrideDims, DilationDims, KernelDims };
 
 public:
     static const std::string Type;
@@ -73,7 +75,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Fold_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -111,13 +113,15 @@ inline std::shared_ptr<Node> Fold(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Fold, not supported");
     return Fold(to_array(outputDims), to_array(kernelDims), name, strideDims, dilationDims);
 }
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 extern template class Aidge::Fold_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
+const char *const EnumStrings<Aidge::Operator::Fold_Op<2>::mAttr>::data[] = {
     "output_dims",
     "stride_dims",
     "dilation_dims",
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 80dcdd67883529c710b142b6b547d4b02e85cd44..43bfcdb53af157c9a4785d0516b0da51771eed34 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -25,27 +25,31 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Gather_OpImpl : public OperatorImpl {
 public:
-    Gather_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Gather_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
-enum class GatherAttr { Axis, Indices, GatheredShape };
 
 class Gather_Op : public OperatorTensor,
                 public Registrable<Gather_Op,
                                    std::string,
                                    std::function<std::shared_ptr<OperatorImpl>(const Gather_Op&)>> {
+public:
+enum class mAttr { Axis, Indices, GatheredShape };
+
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<GatherAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                             std::int8_t,
                                             std::vector<int64_t>,
                                             std::vector<DimSize_t>>;
 private:
-    template <GatherAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -67,7 +71,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Gather_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -76,9 +80,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::int8_t& axis() const { return mAttributes -> getAttr<GatherAttr::Axis>(); }
-    inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<GatherAttr::Indices>(); }
-    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<GatherAttr::GatheredShape>(); }
+    inline std::int8_t& axis() const { return mAttributes -> getAttr<mAttr::Axis>(); }
+    inline std::vector<int64_t>& indices() const { return mAttributes -> getAttr<mAttr::Indices>(); }
+    inline std::vector<DimSize_t>& gatheredShape() const { return mAttributes -> getAttr<mAttr::GatheredShape>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "indices"};
@@ -89,11 +93,13 @@ public:
 };
 
 std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int64_t>& indices = {}, const std::vector<DimSize_t>& gatheredShape = {}, const std::string& name = "");
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
+const char *const EnumStrings<Aidge::Operator::Gather_Op::mAttr>::data[] = {"axis", "indices", "gathered_shape"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index 89b2c06a52f180ffb35363cb6ab07d4242e12033..f5fe30308f8c9e7d490e829701b337a795dd4aa4 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -24,6 +24,8 @@
 
 
 namespace Aidge {
+namespace Operator {
+
 class GenericOperator_Op
     : public OperatorTensor,
       public Registrable<GenericOperator_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(std::shared_ptr<GenericOperator_Op>)>> {
@@ -51,7 +53,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::GenericOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
 public:
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -107,6 +109,7 @@ std::shared_ptr<Node> GenericOperator(const std::string& type, const std::vector
  */
 std::shared_ptr<Node> GenericOperator(const std::string& type, IOIndex_t nbData, IOIndex_t nbParam, IOIndex_t nbOut,
                                              const std::string& name = "");
-}  // namespace Aidge
+
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GENERICOPERATOR_H_ */
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index ef440e8c697ff221aa8df42e459de7ac697e8a0c..6c2759269fb9cf2621099dc023eef57e95acfff7 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 /**
  * @brief Description for the tensor data structure.
@@ -41,7 +42,7 @@ public:
 
   GlobalAveragePooling_Op(const GlobalAveragePooling_Op &op);
 
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -58,6 +59,7 @@ public:
 
 std::shared_ptr<Node> GlobalAveragePooling(const std::string &name = "");
 
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_GLOBAL_AVERAGE_POOLING_H_ */
diff --git a/include/aidge/operator/GridSample.hpp b/include/aidge/operator/GridSample.hpp
index dc2b2059e75711572e0f7fa94cc6ccb9f58c970b..a4384fe3cb9cf0e2bcb0bf8ef1017373952c3341 100644
--- a/include/aidge/operator/GridSample.hpp
+++ b/include/aidge/operator/GridSample.hpp
@@ -24,6 +24,7 @@
 #include "aidge/utils/StaticAttributes.hpp"
 
 namespace Aidge {
+namespace Operator {
 
 enum class GridSampleAttr { Mode, PaddingMode, AlignCorners };
 
@@ -53,7 +54,7 @@ public:
 
 public:
 
-	std::shared_ptr<Operator> clone() const override;
+	std::shared_ptr<AbsOperator> clone() const override;
 
 	bool forwardDims(bool /*allowDataDependencies*/ = false) override final;
 
@@ -79,6 +80,7 @@ std::shared_ptr<Node> GridSample(
                         bool alignCorners = false,
                         const std::string& name = "");
 
+} // namespace Operator
 } // namespace Aidge
 
 
diff --git a/include/aidge/operator/ILayerNorm.hpp b/include/aidge/operator/ILayerNorm.hpp
index f660cc64eb65770cc6cf5335d9c070b155d03c0f..37b644862a0e3162c203f0d027fd3e64441259c3 100644
--- a/include/aidge/operator/ILayerNorm.hpp
+++ b/include/aidge/operator/ILayerNorm.hpp
@@ -26,6 +26,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class ILayerNorm_Op : public OperatorTensor,
     public Registrable<ILayerNorm_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ILayerNorm_Op&)>> {
@@ -54,7 +55,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ILayerNorm_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<ILayerNorm_Op>(*this);
     }
 
@@ -76,6 +77,8 @@ public:
 inline std::shared_ptr<Node> ILayerNorm(const std::string& name = "") {
     return std::make_shared<Node>(std::make_shared<ILayerNorm_Op>(), name);
 }
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_ILAYERNORM_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 24476f231806bf38ae48b9e2d5ec405e072afdb2..079718cf8c7bf91e93c343997e5aeb581f25db58 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -26,6 +26,8 @@
 #include "aidge/utils/ErrorHandling.hpp"
 
 namespace Aidge {
+namespace Operator {
+
 class Identity_OpImpl : public OperatorImpl {
 public:
     Identity_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
@@ -57,7 +59,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Identity_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
@@ -72,6 +74,7 @@ public:
 
 std::shared_ptr<Node> Identity(const std::string& name = "");
 
-}
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_IDENTITY_H_ */
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 179eb90b39bb5d527781289b9b233d3a29d14494..97bbdaba83c777d5124a2a149d2c24a1bef77890 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -24,18 +24,19 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class LeakyReLUAttr {
-    NegativeSlope
-};
+namespace Operator {
 
 class LeakyReLU_Op : public OperatorTensor,
     public Registrable<LeakyReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const LeakyReLU_Op&)>> {
+public:
+    enum class mAttr { NegativeSlope };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<LeakyReLUAttr, float>;
-    template <LeakyReLUAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, float>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -46,7 +47,7 @@ public:
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(
             std::make_shared<Attributes_>(
-                attr<LeakyReLUAttr::NegativeSlope>(negativeSlope)))
+                attr<mAttr::NegativeSlope>(negativeSlope)))
     {}
 
     /**
@@ -59,13 +60,13 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::LeakyReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<LeakyReLUAttr::NegativeSlope>(); }
+    inline float& negativeSlope() const noexcept { return mAttributes -> getAttr<mAttr::NegativeSlope>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -76,11 +77,13 @@ public:
 };
 
 std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
+const char* const EnumStrings<Aidge::Operator::LeakyReLU_Op::mAttr>::data[]
     = {"negative_slope"};
 }
 
diff --git a/include/aidge/operator/Ln.hpp b/include/aidge/operator/Log.hpp
similarity index 77%
rename from include/aidge/operator/Ln.hpp
rename to include/aidge/operator/Log.hpp
index 22fc51664b89bcdeb5970b0cc92beafdde52e43f..6d3023aa60641d51c6b2c7d488705b68181221a5 100755
--- a/include/aidge/operator/Ln.hpp
+++ b/include/aidge/operator/Log.hpp
@@ -24,25 +24,26 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
-class Ln_Op : public OperatorTensor,
-    public Registrable<Ln_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Ln_Op&)>> {
+class Log_Op : public OperatorTensor,
+    public Registrable<Log_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Log_Op&)>> {
 public:
     static const std::string Type;
 
-    Ln_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
+    Log_Op() : OperatorTensor(Type, {InputCategory::Data}, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    Ln_Op(const Ln_Op& op);
+    Log_Op(const Log_Op& op);
 
     /**
      * @brief Clone the operator using its copy-constructor.
-     * @see Operator::Ln_Op
+     * @see Operator::Log_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -56,7 +57,9 @@ public:
     }
 };
 
-std::shared_ptr<Node> Ln(const std::string& name = "");
-}
+std::shared_ptr<Node> Log(const std::string& name = "");
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_LN_H_ */
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index bf6ab84c7373962e71434050427c9b6ecae3b034..f6be6d423b4b8a2418170259504e69c13b8fb98b 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
+namespace Operator {
 
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
@@ -42,7 +43,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MatMul_Op
      */
-    std::shared_ptr<Operator> clone() const override final;
+    std::shared_ptr<AbsOperator> clone() const override final;
 
     /**
      * @brief Compute dimensions for the output Tensor following the same rules as
@@ -70,6 +71,8 @@ public:
 };
 
 std::shared_ptr<Node> MatMul(const std::string& name = "");
+
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MATMUL_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 0cc43a6fbe50849b169a59d048962668d3e4666c..48d1861dc2e593a98b0841499822cfcee9ef46e6 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -29,21 +29,25 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class MaxPoolingAttr { StrideDims, KernelDims, CeilMode };
+namespace Operator {
+
 
 template <DimIdx_t DIM>
 class MaxPooling_Op : public OperatorTensor,
                 public Registrable<MaxPooling_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const MaxPooling_Op<DIM> &)>> {
+public:
+enum class mAttr { StrideDims, KernelDims, CeilMode };
+
 public:
     static const std::string Type;
 
-    using Attributes_ = StaticAttributes<MaxPoolingAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                              std::array<DimSize_t, DIM>,
                                              std::array<DimSize_t, DIM>,
                                              bool>;
 
 private:
-    template <MaxPoolingAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -64,7 +68,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MaxPooling_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -72,9 +76,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<MaxPoolingAttr::StrideDims>(); }
-    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<MaxPoolingAttr::KernelDims>(); }
-    inline bool& ceilMode() const { return mAttributes->template getAttr<MaxPoolingAttr::CeilMode>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); }
+    inline bool& ceilMode() const { return mAttributes->template getAttr<mAttr::CeilMode>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -104,11 +108,13 @@ inline std::shared_ptr<Node> MaxPooling(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by MaxPooling, not supported");
     return MaxPooling(to_array(kernel_dims), name, stride_dims, ceil_mode);
 }
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
+const char *const EnumStrings<Aidge::Operator::MaxPooling_Op::mAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 2b05b5fffed98a7df99a450a5f99c88efa2f7288..e52a3b893ffc279b39324149d58670f0db800166 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -25,9 +25,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Memorize_ProdConso : public ProdConso {
 public:
-    Memorize_ProdConso(const Operator& op): ProdConso(op) {}
+    Memorize_ProdConso(const AbsOperator& op): ProdConso(op) {}
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override final;
     Elts_t getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const override final;
     void updateConsummerProducer() override;
@@ -35,21 +37,24 @@ public:
 
 class Memorize_OpImpl : public OperatorImpl {
 public:
-    Memorize_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Memorize_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Memorize_ProdConso>(mOp); };
     void forward() override;
 };
 
-enum class MemorizeAttr { ScheduleStep, ForwardStep, EndStep };
 
 class Memorize_Op : public OperatorTensor,
     public Registrable<Memorize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Memorize_Op&)>> {
+
+public:
+enum class mAttr { ScheduleStep, ForwardStep, EndStep };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<MemorizeAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
-    template <MemorizeAttr e>
+    using Attributes_ = StaticAttributes<mAttr, std::uint32_t, std::uint32_t, std::uint32_t>;
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -69,7 +74,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Memorize_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
@@ -80,9 +85,9 @@ public:
     void forward() override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<MemorizeAttr::ScheduleStep>(); }
-    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<MemorizeAttr::ForwardStep>(); }
-    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<MemorizeAttr::EndStep>(); }
+    inline std::uint32_t& scheduleStep() const { return mAttributes->template getAttr<mAttr::ScheduleStep>(); }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<mAttr::ForwardStep>(); }
+    inline std::uint32_t& endStep() const { return mAttributes->template getAttr<mAttr::EndStep>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "data_input_init"};
@@ -93,11 +98,13 @@ public:
 };
 
 std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::string& name = "");
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
+const char *const EnumStrings<Aidge::Operator::Memorize_Op::mAttr>::data[] = {
     "schedule_step",
     "forward_step",
     "end_step"
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 744dbd1327a83267b7840e03ba83190326ee6cdd..d4d476f35e689b7d42c16a35cdf8bd0c3479500a 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -27,6 +27,8 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class MetaOperator_Op : public OperatorTensor,
                 public Registrable<MetaOperator_Op, std::array<std::string, 2>, std::function<std::unique_ptr<OperatorImpl>(const MetaOperator_Op &)>> {
 public:
@@ -59,7 +61,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MetaOperator_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept {
         return mGraph;
@@ -115,6 +117,8 @@ std::shared_ptr<Node> MetaOperator(const char *type,
                                   const std::shared_ptr<GraphView>& graph,
                                   const std::vector<InputCategory>& forcedInputsCategory = {},
                                   const std::string& name = "");
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* MetaOperator_H_ */
diff --git a/include/aidge/operator/MetaOperatorDefs.hpp b/include/aidge/operator/MetaOperatorDefs.hpp
index 750a808aaeb23447578501f8b27c7eba3d34234c..e56f5848fd36e10c154e6ca3841712860413f46d 100644
--- a/include/aidge/operator/MetaOperatorDefs.hpp
+++ b/include/aidge/operator/MetaOperatorDefs.hpp
@@ -30,7 +30,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-
+namespace Operator {
 
 template <std::array<DimSize_t, 1>::size_type DIM>
 extern std::shared_ptr<Node> PaddedConv(DimSize_t in_channels,
@@ -165,6 +165,7 @@ std::shared_ptr<Node> LSTM(DimSize_t in_channels,
 
 std::shared_ptr<MetaOperator_Op> LSTM_Op(DimSize_t seq_length);
 
-}  // namespace Aidge
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_METAOPERATORDEFS_H_ */
diff --git a/include/aidge/operator/Move.hpp b/include/aidge/operator/Move.hpp
index 49d92cd12f68a0b23530039c1df70ced9b2d2080..15dfb2da2b7c51de77524410bef9c2304e28bae2 100644
--- a/include/aidge/operator/Move.hpp
+++ b/include/aidge/operator/Move.hpp
@@ -24,9 +24,11 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Move_OpImpl : public OperatorImpl {
 public:
-    Move_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Move_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
@@ -47,7 +49,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Move_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
     std::set<std::string> getAvailableBackends() const override;
@@ -62,6 +64,7 @@ public:
 
 std::shared_ptr<Node> Move(const std::string& name = "");
 
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MOVE_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index bfe4fcb0de1cb7dda4a0ea8fc7b99638bc813f47..4997d259a70c4d003faf6c25244baed168c3f421 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 /**
  * @brief Tensor element-wise multiplication.
@@ -45,7 +46,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Mul_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -62,6 +63,7 @@ public:
 
 std::shared_ptr<Node> Mul(const std::string& name = "");
 
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_MUL_H_ */
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index a799153e1db5eb83964ed06dd3bc0fb06da64de8..61e8b18b7d5a479b101bc86281b2d9eb766f652e 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -33,7 +33,9 @@
 #ifdef PYBIND
 namespace py = pybind11;
 #endif
+
 namespace Aidge {
+namespace Operator {
 
 enum class OperatorType {
     Data,
@@ -47,7 +49,7 @@ enum class InputCategory {
     OptionalParam
 };
 
-class Operator : public std::enable_shared_from_this<Operator> {
+class AbsOperator : public std::enable_shared_from_this<AbsOperator> {
 protected:
     std::shared_ptr<OperatorImpl> mImpl; // implementation of the operator
     std::shared_ptr<DynamicAttributes> mInheritedAttrs;
@@ -60,8 +62,8 @@ private:
     std::set<IOIndex_t> mBackEdges;
 
 public:
-    Operator() = delete;
-    Operator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
+    AbsOperator() = delete;
+    AbsOperator(const std::string& type, const std::vector<InputCategory>& inputsCategory, const IOIndex_t nbOut, const OperatorType operatorType = OperatorType::Data)
     : mType(type),
       mOperatorType(operatorType),
       mInputsCategory(inputsCategory),
@@ -70,8 +72,8 @@ public:
         // ctor
     }
 
-    Operator(const Operator& op):
-        std::enable_shared_from_this<Operator>(),
+    AbsOperator(const AbsOperator& op):
+        std::enable_shared_from_this<AbsOperator>(),
         mOperatorType(op.mOperatorType),
         mInputsCategory(op.mInputsCategory),
         mNbOut(op.mNbOut),
@@ -82,15 +84,15 @@ public:
         // Implementation is never cloned. It is up to the non-abstract Operator copy-constructor to create a new implementation matching the copied Operator implementation.
         // See https://gitlab.eclipse.org/eclipse/aidge/aidge_core/-/merge_requests/8#note_1214050 for the discussion.
     }
-    std::shared_ptr<Operator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
+    std::shared_ptr<AbsOperator> operator()(std::shared_ptr<DynamicAttributes> attrs) {
         mInheritedAttrs = attrs;
         return shared_from_this();
     }
 
-    virtual ~Operator() noexcept;
+    virtual ~AbsOperator() noexcept;
 
 public:
-    virtual std::shared_ptr<Operator> clone() const = 0;
+    virtual std::shared_ptr<AbsOperator> clone() const = 0;
 
     virtual std::shared_ptr<Attributes> attributes() const { return nullptr; };
     virtual std::shared_ptr<DynamicAttributes> inheritedAttributes() const { return mInheritedAttrs; };
@@ -239,6 +241,8 @@ public:
     }
 #endif
 };
+
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_OPERATOR_H_ */
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index c8cdd93810e18bd3cdd0a2d080e54aae2d787c66..869f1355a5bfc75fcb45127669e032cdd074d897 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -21,9 +21,10 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Tensor;
-class OperatorTensor : public Operator {
+class OperatorTensor : public AbsOperator {
     /* TODO: Add an attribute specifying the type of Data used by the Operator.
      * The same way ``Type`` attribute specifies the type of Operator. Hence this
      * attribute could be checked in the forwardDims function to assert Operators
@@ -43,7 +44,7 @@ public:
     /**
      * @brief Operator tensor constructor. This function is not meant to be called directly but by a derived class constructor
      * every operator class derive from this class.
-     * 
+     *
 	 * @param[in] type     : type of operator (i.e. "Add", "AveragePool",...)
 	 * @param[in] inputsCategory : describes the type of each input.
 	 * @param[in] nbOut    : Number of tensors this operator will output
@@ -94,7 +95,7 @@ public:
  	 *        - TOKEN mode means that forwarddims will only ensure that all inputs and outputs of the graph the node is within are connected.
  	 * @param[in] allowDataDependency if set to true, this means that this operator output dimensions depends on the dimensions of optionnal parameter tensors.
  	 * @return true if dims have been properly forwarded. false otherwise. If set to false, then forwardDims will enter in token mode.
- 	 *      
+ 	 *
      */
     virtual bool forwardDims(bool allowDataDependency = false);
     virtual bool dimsForwarded() const;
@@ -108,6 +109,8 @@ public:
 protected:
     bool inputsAssociated(bool checkNonEmpty = true) const;
 };
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif  // AIDGE_CORE_OPERATOR_OPERATORTENSOR_H_
\ No newline at end of file
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 2c670bf23d4703a5a9e8502c8b356fdde32e2561..85b298ac546fe2bfcb3445f9fbf069c49cb25ca0 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -24,21 +24,25 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class PadAttr { BeginEndBorders, BorderType, BorderValue };
-enum class PadBorderType { Constant, Edge, Reflect, Wrap };
+namespace Operator {
+
 
 template <DimIdx_t DIM>
 class Pad_Op : public OperatorTensor,
                 public Registrable<Pad_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pad_Op<DIM> &)>> {
+public:
+enum class mAttr { BeginEndBorders, BorderType, BorderValue };
+enum class BorderType { Constant, Edge, Reflect, Wrap };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<PadAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                             std::array<DimSize_t, 2*DIM>,
-                                            PadBorderType,
+                                            BorderType,
                                             double>;
-    template <PadAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -47,13 +51,13 @@ public:
     Pad_Op() = delete;
 
     constexpr Pad_Op(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
-                     const PadBorderType &borderType = PadBorderType::Constant,
+                     const BorderType &borderType = BorderType::Constant,
                      double borderValue = 0.0)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<PadAttr::BeginEndBorders>(beginEndTuples),
-            attr<PadAttr::BorderType>(borderType),
-            attr<PadAttr::BorderValue>(borderValue))) {}
+            attr<mAttr::BeginEndBorders>(beginEndTuples),
+            attr<mAttr::BorderType>(borderType),
+            attr<mAttr::BorderValue>(borderValue))) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
@@ -68,7 +72,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pad_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
@@ -77,9 +81,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<PadAttr::BeginEndBorders>(); }
-    inline PadBorderType& borderType() const noexcept { return mAttributes->template getAttr<PadAttr::BorderType>(); }
-    inline double& borderValue() const noexcept { return mAttributes->template getAttr<PadAttr::BorderValue>(); }
+    inline std::array<DimSize_t, 2*DIM>& beginEndBorders() const noexcept { return mAttributes->template getAttr<mAttr::BeginEndBorders>(); }
+    inline BorderType& borderType() const noexcept { return mAttributes->template getAttr<mAttr::BorderType>(); }
+    inline double& borderValue() const noexcept { return mAttributes->template getAttr<mAttr::BorderValue>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -92,7 +96,7 @@ public:
 template <std::array<DimSize_t, 1>::size_type DIM>
 std::shared_ptr<Node> Pad(const std::array<DimSize_t, 2*DIM> &beginEndTuples,
                         const std::string& name = "",
-                        const PadBorderType &borderType = PadBorderType::Constant,
+                        const BorderType &borderType = BorderType::Constant,
                         double borderValue = 0.0);
 
 // helper with C-style array instead of std::array for beginEndTuples to allow automatic template DIM deduction
@@ -100,22 +104,26 @@ template <DimSize_t DIM>
 inline std::shared_ptr<Node> Pad(
     DimSize_t const (&beginEndTuples)[2*DIM],
     const std::string& name = "",
-    const PadBorderType &borderType = PadBorderType::Constant,
+    const BorderType &borderType = BorderType::Constant,
     double borderValue = 0.0)
 {
     return Pad<DIM>(to_array(beginEndTuples), name, borderType, borderValue);
 }
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 extern template class Aidge::Pad_Op<1>;
 extern template class Aidge::Pad_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::PadAttr>::data[] = {"begin_end_borders", "border_type", "border_value"};
+const char *const EnumStrings<Aidge::Operator::Pad_Op<1>::mAttr>::data[] = {"begin_end_borders", "border_type", "border_value"};
+const char *const EnumStrings<Aidge::Operator::Pad_Op<2>::mAttr>::data[] = {"begin_end_borders", "border_type", "border_value"};
 
 template <>
-const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
+const char *const EnumStrings<Aidge::Operator::Pad_Op<1>::BorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
+const char *const EnumStrings<Aidge::Operator::Pad_Op<2>::BorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_PAD_H_ */
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index d5898b3630721b036b3acb916e6dec87455009f7..33114d4a60131bd160d3696360865116bf70080c 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -24,29 +24,34 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Pop_ProdConso : public ProdConso {
 public:
-    Pop_ProdConso(const Operator& op): ProdConso(op) {}
+    Pop_ProdConso(const AbsOperator& op): ProdConso(op) {}
     Elts_t getNbRequiredData(const IOIndex_t inputIdx) const override;
 };
 
 class Pop_OpImpl : public OperatorImpl {
 public:
-    Pop_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Pop_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     std::shared_ptr<ProdConso> getProdConso() const override { return std::make_shared<Pop_ProdConso>(mOp); };
     void forward() override;
 };
 
-enum class PopAttr { ForwardStep };
 
 class Pop_Op : public OperatorTensor,
     public Registrable<Pop_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Pop_Op&)>> {
+
+public:
+enum class mAttr { ForwardStep };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<PopAttr, std::uint32_t>;
-    template <PopAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, std::uint32_t>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -62,7 +67,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pop_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
@@ -72,7 +77,7 @@ public:
     void forward() override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<PopAttr::ForwardStep>(); }
+    inline std::uint32_t& forwardStep() const { return mAttributes->template getAttr<mAttr::ForwardStep>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -83,11 +88,13 @@ public:
 };
 
 std::shared_ptr<Node> Pop(const std::string& name = "");
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::PopAttr>::data[] = {
+const char *const EnumStrings<Aidge::Operator::Pop_Op::mAttr>::data[] = {
     "forward_step"
 };
 }
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index f6762dd33088f486184bdfd0a5b8dbdbd0c641da..8ed5a20f8a6329651e84d85e5d3c01fc24798847 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Pow_Op : public OperatorTensor,
     public Registrable<Pow_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Pow_Op&)>> {
@@ -49,7 +50,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Pow_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<Pow_Op>(*this);
     }
 
@@ -68,6 +69,8 @@ public:
 };
 
 std::shared_ptr<Node> Pow(const std::string& name = "");
+
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_POW_H_ */
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 115ddcb5549b1c0daa01b3ab67946655cda7287c..db8561982daf6290d8b2d6083fe814e81aa72b4f 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -25,19 +25,21 @@
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-
-enum class ProdAttr { Constant };
+namespace Operator {
 
 class Producer_Op
     : public OperatorTensor,
       public Registrable<Producer_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(
                                           const Producer_Op &)>> {
+public:
+enum class mAttr { Constant };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ProdAttr, bool>;
-    template <ProdAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, bool>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -75,7 +77,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Producer_Op(const Producer_Op&)
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void associateInput(const IOIndex_t /*inputIdx*/, const std::shared_ptr<Data>& /*data*/) override final {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
@@ -92,7 +94,7 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline bool& constant() const { return mAttributes->template getAttr<ProdAttr::Constant>(); }
+    inline bool& constant() const { return mAttributes->template getAttr<mAttr::Constant>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {};
@@ -132,11 +134,13 @@ template <std::size_t DIM>
 std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOIndex_t inputIdx, DimSize_t const (&dims)[DIM], const std::string& extension) {
     return addProducer(otherNode, inputIdx, to_array(dims), extension);
 }
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ProdAttr>::data[] = {
+const char *const EnumStrings<Aidge::Operator::Producer_Op::mAttr>::data[] = {
     "constant"
 };
 }
diff --git a/include/aidge/operator/ReLU.hpp b/include/aidge/operator/ReLU.hpp
index 9b264c1d3d7955f71538dd90f105cfd7ee469d0a..cc0eee6e04b99db328c007f1309a054bc1b561d2 100644
--- a/include/aidge/operator/ReLU.hpp
+++ b/include/aidge/operator/ReLU.hpp
@@ -24,6 +24,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class ReLU_Op : public OperatorTensor,
     public Registrable<ReLU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReLU_Op&)>> {
@@ -42,7 +43,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReLU_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -57,6 +58,8 @@ public:
 };
 
 std::shared_ptr<Node> ReLU(const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 5d5895a8fb279f1efa5c6321614199f44402b83a..86f947d7656d34e3418498e98155e6ad464a2f75 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -26,7 +26,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
+namespace Operator {
 
 /**
  * @brief This operator has as purpose to reduce given axes by replacing with the mean value.
@@ -34,15 +34,18 @@ enum class ReduceMeanAttr { Axes, KeepDims, NoopWithEmptyAxes };
 class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceMean_Op &)>> {
 
+public:
+enum class mAttr { Axes, KeepDims, NoopWithEmptyAxes };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ReduceMeanAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                             std::vector<std::int32_t>,
                                             bool,
                                             bool>;
-    template <ReduceMeanAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -52,7 +55,7 @@ public:
     /**
      * @brief constructor for ReduceMean op
      * @param[in] axes around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and
      * if false we remove the dimension completely
      * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
      * and if false, we reduce on all axes
@@ -69,7 +72,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReduceMean_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -77,9 +80,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::Axes>(); }
-    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::KeepDims>(); }
-    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceMeanAttr::NoopWithEmptyAxes>(); }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<mAttr::Axes>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<mAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<mAttr::NoopWithEmptyAxes>(); }
 
 
     static const std::vector<std::string> getInputsName() {
@@ -120,11 +123,12 @@ std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
                                         bool noop_with_empty_axes=false,
                                         const std::string& name = "");
 
-}  // namespace Aidge
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+const char *const EnumStrings<Aidge::Operator::ReduceMean::mAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index bae03cb7d2e3ac855537eb22e54bf706ec0e0b4a..25ec686c77d3e3e1ab0490f2126b39a015b50ee0 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -26,8 +26,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
-
+namespace Operator {
 
 /**
  * @brief This operator has as purpose to reduce given axes by replacing with the sum value.
@@ -35,15 +34,18 @@ enum class ReduceSumAttr { Axes, KeepDims, NoopWithEmptyAxes };
 class ReduceSum_Op : public OperatorTensor,
                 public Registrable<ReduceSum_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ReduceSum_Op &)>> {
 
+public:
+enum class mAttr { Axes, KeepDims, NoopWithEmptyAxes };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ReduceSumAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                             std::vector<std::int32_t>,
                                             bool,
                                             bool>;
-    template <ReduceSumAttr e>
+    template <mAttr e>
     using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
@@ -53,7 +55,7 @@ public:
     /**
      * @brief constructor for ReduceSum op
      * @param[in] axes around which perform the operation
-     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and 
+     * @param[in] keep_dims if true we set a dimension of 1 in the place of the reduced axes and
      * if false we remove the dimension completely
      * @param[in] noop_with_empty_axes used when no axes are provided, if set to true, the operator does nothing
      * and if false, we reduce on all axes
@@ -61,9 +63,9 @@ public:
     ReduceSum_Op(const std::vector<std::int32_t>& axes, bool keep_dims, bool noop_with_empty_axes)
         : OperatorTensor(Type, {InputCategory::Data}, 1),
           mAttributes(std::make_shared<Attributes_>(
-            attr<ReduceSumAttr::Axes>(axes),
-            attr<ReduceSumAttr::KeepDims>(keep_dims),
-            attr<ReduceSumAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
+            attr<mAttr::Axes>(axes),
+            attr<mAttr::KeepDims>(keep_dims),
+            attr<mAttr::NoopWithEmptyAxes>(noop_with_empty_axes)))
     {}
 
     /**
@@ -85,7 +87,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ReduceSum_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<AbsOperator> clone() const override {
         return std::make_shared<ReduceSum_Op>(*this);
     }
 
@@ -95,9 +97,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::Axes>(); }
-    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::KeepDims>(); }
-    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<ReduceSumAttr::NoopWithEmptyAxes>(); }
+    inline std::vector<std::int32_t>& axes() const noexcept { return mAttributes -> getAttr<mAttr::Axes>(); }
+    inline bool& keepDims() const noexcept { return mAttributes -> getAttr<mAttr::KeepDims>(); }
+    inline bool& noopWithEmptyAxes() const noexcept { return mAttributes -> getAttr<mAttr::NoopWithEmptyAxes>(); }
 
 
     static const std::vector<std::string> getInputsName() {
@@ -126,11 +128,13 @@ inline std::shared_ptr<Node> ReduceSum(const std::vector<std::int32_t> &axes={},
     return std::make_shared<Node>(std::make_shared<ReduceSum_Op>(axes, keep_dims, noop_with_empty_axes), name);
 
 }
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceSumAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
+const char *const EnumStrings<Aidge::Operator::ReduceSum_Op::mAttr>::data[] = {"axes", "keep_dims", "noop_with_empty_axes"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCESUM_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 721b964d3ff4cd87121d43e8719a8fde1445761b..cc471c15b7fe1357a07da9985bfec6448acf935f 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -23,25 +23,28 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Reshape_OpImpl : public OperatorImpl {
 public:
-    Reshape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Reshape_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
-enum class ReshapeAttr { Shape, AllowZero };
-
 class Reshape_Op : public OperatorTensor,
                    public Registrable<Reshape_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Reshape_Op&)>> {
 
+public:
+enum class mAttr { Shape, AllowZero };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ReshapeAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                             std::vector<std::int64_t>,
                                             bool>;
-    template <ReshapeAttr e> using attr = typename Attributes_::template attr<e>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -59,7 +62,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Reshape_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -68,8 +71,8 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<ReshapeAttr::Shape>(); }
-    inline bool& allowZero() const { return mAttributes->template getAttr<ReshapeAttr::AllowZero>(); }
+    inline std::vector<std::int64_t>& shape() const { return mAttributes->template getAttr<mAttr::Shape>(); }
+    inline bool& allowZero() const { return mAttributes->template getAttr<mAttr::AllowZero>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -82,11 +85,13 @@ public:
 std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {},
                             bool allowzero = false,
                             const std::string &name = "");
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "shape", "allow_zero" };
+const char *const EnumStrings<Aidge::Operator::Reshape_Op::mAttr>::data[] = { "shape", "allow_zero" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index a48b95aff2a18750d83f12a62c408ad41b20afee..e55e640bbe5cfb0d05569a818be091499ec9575e 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Resize_Op : public OperatorTensor,
                   public Registrable<Resize_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Resize_Op&)>>{
@@ -43,7 +44,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Resize_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -62,7 +63,8 @@ public:
 
 std::shared_ptr<Node> Resize(const std::string &name = "");
 
-}  // namespace Aidge
+} // namespace Operator
+} // namespace Aidge
 
 
 #endif /* AIDGE_CORE_OPERATOR_Resize_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Round.hpp b/include/aidge/operator/Round.hpp
index 00352421d193eff543f1351b57f8db54ac742393..2031b735e26efca4f5d783396097f6971f7615fa 100644
--- a/include/aidge/operator/Round.hpp
+++ b/include/aidge/operator/Round.hpp
@@ -23,13 +23,12 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Round_Op : public OperatorTensor,
                 public Registrable<Round_Op,
                                 std::string,
                                 std::function<std::shared_ptr<OperatorImpl>(const Round_Op&)>> {
-
-
 public:
     static const std::string Type;
 
@@ -45,7 +44,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Round_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
@@ -58,7 +57,8 @@ public:
 };
 
 std::shared_ptr<Node> Round(const std::string& name = "");
-}
 
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_ROUND_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 4ef39f63a2f9af34cd3fe28b01cf2fc195bdfc6e..9a0afff932c74c8c17260388759da6c940416cf1 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -24,19 +24,22 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class ScalingAttr {
-    ScalingFactor, QuantizedNbBits, IsOutputUnsigned
-};
+namespace Operator {
 
 class Scaling_Op
     : public OperatorTensor,
       public Registrable<Scaling_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Scaling_Op&)>> {
+public:
+enum class mAttr {
+    ScalingFactor, QuantizedNbBits, IsOutputUnsigned
+};
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ScalingAttr, float, std::size_t, bool>;
-    template <ScalingAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, float, std::size_t, bool>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -54,15 +57,15 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Scaling_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<ScalingAttr::ScalingFactor>(); }
-    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<ScalingAttr::QuantizedNbBits>(); }
-    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<ScalingAttr::IsOutputUnsigned>(); }
+    inline float& scalingFactor() const noexcept { return mAttributes -> getAttr<mAttr::ScalingFactor>(); }
+    inline std::size_t& quantizedNbBits() const noexcept { return mAttributes -> getAttr<mAttr::QuantizedNbBits>(); }
+    inline bool& isOutputUnsigned() const noexcept { return mAttributes -> getAttr<mAttr::IsOutputUnsigned>(); }
 
     static const std::vector<std::string> getInputsName() {
         return {"data_input"};
@@ -81,11 +84,13 @@ std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
                                      std::size_t quantizedNbBits=8,
                                      bool isOutputUnsigned=true,
                                      const std::string& name = "");
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::ScalingAttr>::data[]
+const char* const EnumStrings<Aidge::Operator::Scaling_Op::Attr>::data[]
     = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
 }
 
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index cfd43fa0dd5a064ee21eafc2d0f50c12ad6e3272..d5b586ace67c9cc4d075bc2c150d10e619200eb3 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -25,25 +25,28 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Shape_OpImpl : public OperatorImpl {
 public:
-    Shape_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Shape_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
-enum class ShapeAttr { Start, End };
 
 class Shape_Op : public OperatorTensor,
                 public Registrable<Shape_Op,
                                    std::string,
                                    std::function<std::shared_ptr<OperatorImpl>(const Shape_Op&)>> {
+public:
+enum class mAttr { Start, End };
 
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<ShapeAttr, std::int64_t, std::int64_t>;
-    template <ShapeAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, std::int64_t, std::int64_t>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -61,7 +64,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Shape_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -69,8 +72,8 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<ShapeAttr::Start>(); }
-    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<ShapeAttr::End>(); }
+    inline std::int64_t& start() const noexcept { return mAttributes -> getAttr<mAttr::Start>(); }
+    inline std::int64_t& end() const noexcept { return mAttributes -> getAttr<mAttr::End>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -81,11 +84,13 @@ public:
 };
 
 std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int64_t end = -1, const std::string& name = "");
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
+const char *const EnumStrings<Aidge::Operator::Shape_Op::mAttr>::data[] = {"start", "end"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/ShiftGELU.hpp b/include/aidge/operator/ShiftGELU.hpp
index 30f1d71e0a56d92a70830a5def81040e0c5a186c..9c199ba15af65bec4f30d90def283dca3aebef5a 100644
--- a/include/aidge/operator/ShiftGELU.hpp
+++ b/include/aidge/operator/ShiftGELU.hpp
@@ -26,6 +26,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class ShiftGELU_Op : public OperatorTensor,
     public Registrable<ShiftGELU_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftGELU_Op&)>> {
@@ -44,7 +45,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftGELU_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -59,6 +60,8 @@ public:
 };
 
 std::shared_ptr<Node> ShiftGELU(const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTGELU_H_ */
diff --git a/include/aidge/operator/ShiftMax.hpp b/include/aidge/operator/ShiftMax.hpp
index 9fbd81aedef1eb640a7ce805d745297edb640560..6cdea1ba483877a2db5c03c8c246d09bbb3b08f9 100644
--- a/include/aidge/operator/ShiftMax.hpp
+++ b/include/aidge/operator/ShiftMax.hpp
@@ -26,6 +26,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class ShiftMax_Op : public OperatorTensor,
     public Registrable<ShiftMax_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const ShiftMax_Op&)>> {
@@ -44,7 +45,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::ShiftMax_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -59,6 +60,8 @@ public:
 };
 
 std::shared_ptr<Node> ShiftMax(const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SHIFTMAX_H_ */
diff --git a/include/aidge/operator/Sigmoid.hpp b/include/aidge/operator/Sigmoid.hpp
index 24bc3321673f4dcffd3e3663f7e0a0e584389492..98ab4197a146de919936dfdcbbcc589f5fdb717f 100644
--- a/include/aidge/operator/Sigmoid.hpp
+++ b/include/aidge/operator/Sigmoid.hpp
@@ -24,6 +24,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Sigmoid_Op : public OperatorTensor,
     public Registrable<Sigmoid_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Sigmoid_Op&)>> {
@@ -34,7 +35,7 @@ public:
 
     Sigmoid_Op(const Sigmoid_Op& op);
 
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
@@ -48,6 +49,8 @@ public:
 };
 
 std::shared_ptr<Node> Sigmoid(const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SIGMOID_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 811402420df170c011e478148cf646e6c585cc84..9f3391da43fcf6e0254a0e44ba7e8764217b81d6 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -24,22 +24,24 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-
-enum class SliceAttr { Starts, Ends, Axes, Steps };
+namespace Operator {
 
 class Slice_Op
     : public OperatorTensor,
       public Registrable<Slice_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Slice_Op &)>> {
+public:
+enum class mAttr { Starts, Ends, Axes, Steps };
+
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<SliceAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                             std::vector<std::int64_t>,
                                             std::vector<std::int64_t>,
                                             std::vector<std::int8_t>,
                                             std::vector<std::int64_t>>;
-    template <SliceAttr e> using attr = typename Attributes_::template attr<e>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -63,7 +65,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Slice_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = true) override final;
@@ -72,10 +74,10 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<SliceAttr::Starts>(); }
-    inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<SliceAttr::Ends>(); }
-    inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<SliceAttr::Axes>(); }
-    inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<SliceAttr::Steps>(); }
+    inline std::vector<std::int64_t>& starts() const noexcept { return mAttributes -> getAttr<mAttr::Starts>(); }
+    inline std::vector<std::int64_t>& ends() const noexcept { return mAttributes -> getAttr<mAttr::Ends>(); }
+    inline std::vector<std::int8_t>& axes() const noexcept { return mAttributes -> getAttr<mAttr::Axes>(); }
+    inline std::vector<std::int64_t>& steps() const noexcept { return mAttributes -> getAttr<mAttr::Steps>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "starts", "ends", "axes", "steps"};
@@ -96,11 +98,13 @@ std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
                                    const std::vector<std::int8_t>& axes = {},
                                    const std::vector<std::int64_t>& steps = {},
                                    const std::string &name = "");
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
+const char *const EnumStrings<Aidge::Operator::Slice_Op::mAttr>::data[] = { "starts", "ends", "axes", "steps" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 72ea56dd6293e416ddcca12ac38fd57d76071354..0a42fdd4b8a42a48075934e3e51c7cb897cb6872 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -24,19 +24,22 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
-enum class SoftmaxAttr { Axis };
+namespace Operator {
+
 
 class Softmax_Op : public OperatorTensor,
                 public Registrable<Softmax_Op,
                                    std::string,
                                    std::function<std::shared_ptr<OperatorImpl>(const Softmax_Op&)>> {
+public:
+enum class mAttr { Axis };
 
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<SoftmaxAttr, std::int32_t>;
-    template <SoftmaxAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, std::int32_t>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -54,14 +57,14 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Softmax_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
 
-    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<SoftmaxAttr::Axis>(); }
+    inline std::int32_t& axis() const noexcept { return mAttributes -> getAttr<mAttr::Axis>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -72,11 +75,13 @@ public:
 };
 
 std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name = "");
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
+const char *const EnumStrings<Aidge::Operator::Softmax_Op::Attr>::data[] = {"axis"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 8c3a111c42dfeb2b4e27269839e41f3b362bdda3..d4624624cc1695e4a94fe5801c9abd9d4ea474f6 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -24,24 +24,27 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class Split_OpImpl : public OperatorImpl {
 public:
-    Split_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Split_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
-enum class SplitAttr { Axis, Split };
 
 class Split_Op
     : public OperatorTensor,
       public Registrable<Split_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Split_Op &)>> {
+public:
+enum class mAttr { Axis, Split };
 
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>>;
-    template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, std::int8_t, std::vector<DimSize_t>>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -62,7 +65,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Split_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
@@ -71,8 +74,8 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::int8_t& axis() const { return mAttributes->template getAttr<SplitAttr::Axis>(); }
-    inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<SplitAttr::Split>(); }
+    inline std::int8_t& axis() const { return mAttributes->template getAttr<mAttr::Axis>(); }
+    inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<mAttr::Split>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "split"};
@@ -92,11 +95,13 @@ std::shared_ptr<Node> Split(DimSize_t nbOutput,
                                    std::int8_t axis = 0,
                                    const std::vector<DimSize_t>& split = {},
                                    const std::string &name = "");
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "axis", "split" };
+const char *const EnumStrings<Aidge::Operator::Split_Op::mAttr>::data[] = { "axis", "split" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Sqrt.hpp b/include/aidge/operator/Sqrt.hpp
index 4858cdcd164d6be0582ddabe67c780461a9667aa..46d0739b43b6a91992d1e8fc15acf24feabad543 100644
--- a/include/aidge/operator/Sqrt.hpp
+++ b/include/aidge/operator/Sqrt.hpp
@@ -17,11 +17,13 @@
 #include <string>
 
 #include "aidge/graph/Node.hpp"
+#include "aidge/operator/Operator.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Sqrt_Op : public OperatorTensor,
                 public Registrable<Sqrt_Op,
@@ -42,7 +44,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sqrt_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
     std::set<std::string> getAvailableBackends() const override;
@@ -56,6 +58,8 @@ public:
 };
 
 std::shared_ptr<Node> Sqrt(const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SQRT_H_ */
diff --git a/include/aidge/operator/Squeeze.hpp b/include/aidge/operator/Squeeze.hpp
index 64a775eb4209ecad0e29decd8336ebb77bbe652f..bc31b5a22fddaf8aadc3f8034d02d0a8acc38db5 100644
--- a/include/aidge/operator/Squeeze.hpp
+++ b/include/aidge/operator/Squeeze.hpp
@@ -29,6 +29,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 /**
  * @brief implementation of the operator squeeze.
  * @note Since this operator implementation is agnostic to the backend it is
@@ -36,7 +37,7 @@ namespace Aidge {
  */
 class Squeeze_OpImpl : public OperatorImpl {
 public:
-  Squeeze_OpImpl(const Operator &op, const std::string &backend = "")
+  Squeeze_OpImpl(const AbsOperator &op, const std::string &backend = "")
       : OperatorImpl(op, backend) {}
   void forward() override;
 };
@@ -110,7 +111,7 @@ public:
    * @brief Clone the operator using its copy-constructor.
    * @see Operator::MatMul_Op
    */
-  std::shared_ptr<Operator> clone() const override final {
+  std::shared_ptr<AbsOperator> clone() const override final {
     return std::make_shared<Squeeze_Op>(*this);
   }
 
@@ -150,6 +151,7 @@ inline std::shared_ptr<Node> Squeeze(const std::vector<int8_t> axes = {},
                                      const std::string &name = "") {
   return std::make_shared<Node>(std::make_shared<Squeeze_Op>(axes), name);
 }
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index 170baf6fd0f38668f64cbd36044c856fae261737..c1d7ed032e927db3123aa3a54424a5f1795ad84c 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -23,6 +23,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Sub_Op : public OperatorTensor,
     public Registrable<Sub_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Sub_Op&)>> {
@@ -42,7 +43,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Sub_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool allowDataDependency = false) override final;
 
@@ -60,6 +61,7 @@ public:
 
 std::shared_ptr<Node> Sub(const std::string& name = "");
 
+} // namespace Operator
 } // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_SUB_H_ */
diff --git a/include/aidge/operator/Tanh.hpp b/include/aidge/operator/Tanh.hpp
index f1a30e3f08ce3886cc1ca39a55a3b23979a47860..abc4296f3a104fdeade3404547f6da0acd33b44d 100644
--- a/include/aidge/operator/Tanh.hpp
+++ b/include/aidge/operator/Tanh.hpp
@@ -22,6 +22,7 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
 
 class Tanh_Op : public OperatorTensor,
     public Registrable<Tanh_Op, std::string, std::function<std::unique_ptr<OperatorImpl>(const Tanh_Op&)>> {
@@ -40,7 +41,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Tanh_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
@@ -55,6 +56,8 @@ public:
 };
 
 std::shared_ptr<Node> Tanh(const std::string& name = "");
-}
+
+} // namespace Operator
+} // namespace Aidge
 
 #endif /* AIDGE_CORE_OPERATOR_TANH_H_ */
\ No newline at end of file
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 155627f2cfd3173ccfbbe2a1ce8c23784cd06d71..447807497ca6556359aaf49bc87a94ff87fc4c66 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -25,26 +25,29 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 class TransposeImpl : public OperatorImpl {
 public:
-    TransposeImpl(const Operator& op, const std::string& backend = "")
+    TransposeImpl(const AbsOperator& op, const std::string& backend = "")
         : OperatorImpl(op, backend)
     {}
     void forward() override;
 };
 
-enum class TransposeAttr { OutputDimsOrder };
 
 class Transpose_Op : public OperatorTensor,
                 public Registrable<Transpose_Op, std::string, std::function<std::shared_ptr<OperatorImpl>(const Transpose_Op&)>> {
+public:
+enum class mAttr { OutputDimsOrder };
 
 public:
     static const std::string Type;
 
 
 private:
-    using Attributes_ = StaticAttributes<TransposeAttr, std::vector<DimSize_t>>;
-    template <TransposeAttr e> using attr = typename Attributes_::template attr<e>;
+    using Attributes_ = StaticAttributes<mAttr, std::vector<DimSize_t>>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -62,7 +65,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Transpose_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -70,7 +73,7 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<TransposeAttr::OutputDimsOrder>(); }
+    inline std::vector<DimSize_t>& outputDimsOrder() const noexcept { return mAttributes -> getAttr<mAttr::OutputDimsOrder>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -82,11 +85,13 @@ public:
 
 std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsOrder,
                                            const std::string& name = "");
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
+const char *const EnumStrings<Aidge::Operator::Transpose_Op::mAttr>::data[] = {"output_dims_order"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 09a689528a6814eca6bb56ef326e2da527f14843..f43579e60c656d11467fed6e7afad6e3803e4f18 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -30,28 +30,31 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 template <DimIdx_t DIM>
 class Unfold_OpImpl : public OperatorImpl {
 public:
-    Unfold_OpImpl(const Operator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
+    Unfold_OpImpl(const AbsOperator& op, const std::string& backend = ""): OperatorImpl(op, backend) {}
     void forward() override;
 };
 
-enum class UnfoldAttr { StrideDims, DilationDims, KernelDims };
 
 template <DimIdx_t DIM>
 class Unfold_Op : public OperatorTensor,
                 public Registrable<Unfold_Op<DIM>, std::string, std::function<std::shared_ptr<OperatorImpl>(const Unfold_Op<DIM> &)>> {
+public:
+enum class mAttr { StrideDims, DilationDims, KernelDims };
 
 public:
     static const std::string Type;
 
 private:
-    using Attributes_ = StaticAttributes<UnfoldAttr,
+    using Attributes_ = StaticAttributes<mAttr,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>,
                                         std::array<DimSize_t, DIM>>;
-    template <UnfoldAttr e> using attr = typename Attributes_::template attr<e>;
+    template <mAttr e> using attr = typename Attributes_::template attr<e>;
     const std::shared_ptr<Attributes_> mAttributes;
 
 public:
@@ -72,7 +75,7 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::Unfold_Op
      */
-    std::shared_ptr<Operator> clone() const override;
+    std::shared_ptr<AbsOperator> clone() const override;
 
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
@@ -80,9 +83,9 @@ public:
     std::set<std::string> getAvailableBackends() const override;
 
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
-    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<UnfoldAttr::StrideDims>(); }
-    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<UnfoldAttr::DilationDims>(); }
-    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<UnfoldAttr::KernelDims>(); }
+    inline std::array<DimSize_t, DIM>& strideDims() const { return mAttributes->template getAttr<mAttr::StrideDims>(); }
+    inline std::array<DimSize_t, DIM>& dilationDims() const { return mAttributes->template getAttr<mAttr::DilationDims>(); }
+    inline std::array<DimSize_t, DIM>& kernelDims() const { return mAttributes->template getAttr<mAttr::KernelDims>(); }
 
     static const std::vector<std::string> getInputsName(){
         return {"data_input"};
@@ -107,13 +110,15 @@ inline std::shared_ptr<Node> Unfold(
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by Unfold, not supported");
     return Unfold(to_array(kernelDims), name, strideDims, dilationDims);
 }
-}  // namespace Aidge
+
+} // namespace Operator
+} // namespace Aidge
 
 extern template class Aidge::Unfold_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = {
+const char *const EnumStrings<Aidge::Operator::Unfold_Op<2>::mAttr>::data[] = {
     "stride_dims",
     "dilation_dims",
     "kernel_dims"
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index c0710540576959b62bbdf235ff6ea15f9d18cacd..2756581f9b648f95038f2101a57daee450bdc504 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -26,6 +26,8 @@
 #include "aidge/utils/Types.h"
 
 namespace Aidge {
+namespace Operator {
+
 /**
  * @brief implementation of the operator unsqueeze.
  * @note Since this operator implementation is agnostic to the backend it is
@@ -33,20 +35,11 @@ namespace Aidge {
  */
 class Unsqueeze_OpImpl : public OperatorImpl {
 public:
-  Unsqueeze_OpImpl(const Operator &op, const std::string &backend = "")
+  Unsqueeze_OpImpl(const AbsOperator &op, const std::string &backend = "")
       : OperatorImpl(op, backend) {}
   void forward() override;
 };
 
-enum class UnsqueezeAttr {
-  /**
-   * @brief vector of axes to unsqueeze.
-   * values must be comprised within
-   * [ -a ; a-1 ]
-   * with a = input_tensor.nbDim() + dims_to_unsqueeze.size()
-   */
-  Axes
-};
 
 /**
  * @brief This operator has as purpose to add a dummy dimension around given
@@ -61,14 +54,24 @@ class Unsqueeze_Op
     : public OperatorTensor,
       public Registrable<Unsqueeze_Op, std::string,
                          std::function<std::shared_ptr<OperatorImpl>(const Unsqueeze_Op &)>> {
+public:
+enum class mAttr {
+  /**
+   * @brief vector of axes to unsqueeze.
+   * values must be comprised within
+   * [ -a ; a-1 ]
+   * with a = input_tensor.nbDim() + dims_to_unsqueeze.size()
+   */
+  Axes
+};
 
 public:
   static const std::string
       Type; // name of the type of the operation (Here "Unsqueeze")
 
 private:
-  using Attributes_ = StaticAttributes<UnsqueezeAttr, std::vector<int8_t>>;
-  template <UnsqueezeAttr e>
+  using Attributes_ = StaticAttributes<mAttr, std::vector<int8_t>>;
+  template <mAttr e>
   using attr = typename Attributes_::template attr<e>;
   const std::shared_ptr<Attributes_> mAttributes;
 
@@ -84,7 +87,7 @@ public:
       : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData},
                        1),
         mAttributes(
-            std::make_shared<Attributes_>(attr<UnsqueezeAttr::Axes>(axes))) {
+            std::make_shared<Attributes_>(attr<mAttr::Axes>(axes))) {
     mImpl = std::make_shared<Unsqueeze_OpImpl>(*this);
   }
 
@@ -107,7 +110,7 @@ public:
    * @brief Clone the operator using its copy-constructor.
    * @see Operator::MatMul_Op
    */
-  std::shared_ptr<Operator> clone() const override final {
+  std::shared_ptr<AbsOperator> clone() const override final {
     return std::make_shared<Unsqueeze_Op>(*this);
   }
 
@@ -131,7 +134,7 @@ public:
    * with : a = input_tensor.nbDim() + dims_to_unsqueeze.size()
    */
   inline std::vector<int8_t> &axes() const noexcept {
-    return mAttributes->template getAttr<UnsqueezeAttr::Axes>();
+    return mAttributes->template getAttr<mAttr::Axes>();
   }
 
   static const std::vector<std::string> getInputsName() {
@@ -148,11 +151,13 @@ inline std::shared_ptr<Node> Unsqueeze(const std::vector<int8_t> &axes = {},
                                        const std::string &name = "") {
   return std::make_shared<Node>(std::make_shared<Unsqueeze_Op>(axes), name);
 }
+
+} // namespace Operator
 } // namespace Aidge
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::UnsqueezeAttr>::data[] = {"Axes"};
+const char *const EnumStrings<Aidge::Operator::Unsqueeze_Op::mAttr>::data[] = {"Axes"};
 }
 
 #endif // AIDGE_CORE_OPERATOR_UNSQUEEZE_H_
diff --git a/include/aidge/recipes/Recipes.hpp b/include/aidge/recipes/Recipes.hpp
index a9b9213e914811ccff7d1e6d8efe4fdd8a505b87..5b0f8c6bdd400379dbbaffc3ee7ee456b6abe4ab 100644
--- a/include/aidge/recipes/Recipes.hpp
+++ b/include/aidge/recipes/Recipes.hpp
@@ -152,7 +152,7 @@ size_t convToMatMul(std::shared_ptr<GraphView> graph);
 
 /**
  * @brief Adapt a graph to the available kernels of a backend.
- * 
+ *
  * @param graph Graph to manipulate
  */
 void adaptToBackend(std::shared_ptr<GraphView> graph);
diff --git a/include/aidge/scheduler/MemoryManager.hpp b/include/aidge/scheduler/MemoryManager.hpp
index 2e397d1dbaa1cc8d8f586d15363cbd2245963152..c3b11e19ecffc4cfbd9af8bdbb8a8d783193e589 100644
--- a/include/aidge/scheduler/MemoryManager.hpp
+++ b/include/aidge/scheduler/MemoryManager.hpp
@@ -20,21 +20,21 @@
 
 namespace Aidge {
 /**
- * @brief The MemoryManager can be used to generate an optimized static memory 
+ * @brief The MemoryManager can be used to generate an optimized static memory
  * layout for a computing graph in a global memory space.
  * The are some assumptions:
- * - A MemoryManager represents a single global memory space, filled with 
+ * - A MemoryManager represents a single global memory space, filled with
  *   contiguous, non-overlapping MemorySpace chunks.
  * - A MemorySpace contains one or multiple MemoryPlane, each MemoryPlane
  *   corresponding to the allocation of a specific Tensor. When a Tensor can re-
  *   use the memory of the preceding one (for in-place or partially in-place
- *   operators), multiple overlapping MemoryPlane can be created in the same 
+ *   operators), multiple overlapping MemoryPlane can be created in the same
  *   MemorySpace (remember, MemorySpace **cannot** be overlapping!).
  * - A MemoryPlane is tailored for handling (N)HWC data with two properties:
  *   - Possibility of wrapping: on the H axis (each W*C block is contiguous).
  *   - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
  * - All the sizes and offets specified in a MemoryManager are expressed in
- *   number of data elements, or **words**, meaning currently a uniform data 
+ *   number of data elements, or **words**, meaning currently a uniform data
  *   precision is expected in a MemoryManager (for instance, if the precision is
  *   16-bits, each data element will be 2 bytes, which will be the size of a word).
  */
@@ -79,15 +79,15 @@ public:
      * MemoryPlane can be non-contiguous (in case of stride, or wrapping, when
      * offset + size > memSpace.size).
      * MemoryPlane cannot be re-arranged inside a MemorySpace.
-     * 
+     *
      * A MemoryPlane is tailored for handling (N)HWC data with two properties:
      * - Possibility of wrapping: on the H axis (each W*C block is contiguous).
      * - Possibility of concatenation: on the C axis (C1+C2+...+Cn).
-     * 
+     *
      * Detail of (N)HWC data handling:
      * - \p length is the size of contiguous and non-breakable memory line (W in HWC);
      * - \p count is the number of memory lines of size \p length constituting a memory block (H in HWC);
-     * - \p stride is the number of channels, or memory blocks, *in total*, 
+     * - \p stride is the number of channels, or memory blocks, *in total*,
      *   of \p count lines of size \p length (C in NHWC);
      * - \p size is the number of channels, or memory blocks, *in this MemoryPlane*,
      *   of \p count lines of size \p length.
@@ -98,7 +98,7 @@ public:
      *                    (with an additionnal relative offset of +C1)
      * In this mode, wrapping can only occur on the H (\p count) axis. W*C chunks
      * are garanteed to be contiguous (\p length * \p stride).
-     * 
+     *
      * By default, \p stride = \p size, \p count = 1 and \p length = 1, meaning
      * there is no NHWC layout and the MemoryPlane can be wrapped **anywhere**.
      * In this case, \p size is the total size of the MemoryPlane (H*W*C, in words).
@@ -140,7 +140,7 @@ public:
 
         /**
          * @brief Get the total size of the MemoryPlane, including the stride.
-         * 
+         *
          * @return unsigned int Total size in words
          */
         inline unsigned int getSize() const {
@@ -150,7 +150,7 @@ public:
         /**
          * @brief Get the useful size of the MemoryPlane, as if its memory blocks
          * were contiguous, without stride.
-         * 
+         *
          * @return unsigned int Useful size in words
          */
         inline unsigned int getUsefulSize() const {
@@ -159,7 +159,7 @@ public:
 
         /**
          * @brief Get the absolute offset of the beginning of the memory plane.
-         * 
+         *
          * @return unsigned int Contiguous offset in words
          */
         inline unsigned int getContiguousOffset() const {
@@ -171,7 +171,7 @@ public:
          * its beginning to the limit of the MemorySpace size.
          * If the MemoryPlane fill the MemorySpace without wrapping, the contiguous
          * size will be the same as the total size of the MemoryPlane.
-         * 
+         *
          * @return unsigned int Contiguous size in words
          */
         inline unsigned int getContiguousSize() const {
@@ -183,7 +183,7 @@ public:
          * Since the wrapped part of the memory plane begins at the beginning of
          * the MemorySpace, the returned offset is always the same as the MemorySpace
          * offset.
-         * 
+         *
          * @return unsigned int Wrapped offset in words
          */
         inline unsigned int getWrappedOffset() const {
@@ -196,7 +196,7 @@ public:
          * including the stride.
          * If the MemoryPlane fill the MemorySpace without wrapping, the wrapped
          * size will 0.
-         * 
+         *
          * @return unsigned int Wrapped size in words
          */
         inline unsigned int getWrappedSize() const {
@@ -207,7 +207,7 @@ public:
          * @brief Get the absolute offset after the end of the memory plane (if it
          * is wrapped, the offset will correspond to the end of the wrapped part).
          * The word at the final offset is not included in the MemoryPlane.
-         * 
+         *
          * @return unsigned int Final offset in words
          */
         inline unsigned int getFinalOffset() const {
@@ -220,7 +220,7 @@ public:
          * @brief Get the absolute offset after the end of the contiguous part
          * of the memory plane.
          * The word at the upper offset is not included in the MemoryPlane.
-         * 
+         *
          * @return unsigned int Upper offset in words
          */
         inline unsigned int getUpperOffset() const {
@@ -264,7 +264,7 @@ public:
         /// of \p count lines of size \p length (the C in NHWC).
         /// There should be C blocks of H*W size.
         unsigned int stride;
-        /// Size of an elementary, contiguous and non-breakable, memory line 
+        /// Size of an elementary, contiguous and non-breakable, memory line
         /// (the W in NHWC), in words. A MemoryPlane wrapping cannot occur in
         /// the middle of a memory line.
         unsigned int length;
diff --git a/include/aidge/scheduler/ProdConso.hpp b/include/aidge/scheduler/ProdConso.hpp
index a7c0ed5ae73d1f891744e835f0da5ad14a37f850..5bb37f3e8ec924939c43417e92a5ecafdc2804a2 100644
--- a/include/aidge/scheduler/ProdConso.hpp
+++ b/include/aidge/scheduler/ProdConso.hpp
@@ -19,17 +19,17 @@
 #include "aidge/data/Elts.hpp"
 
 namespace Aidge {
-class Operator;
+class AbsOperator;
 
 class ProdConso {
 public:
-    ProdConso(const Operator& op, bool inPlace = false);
+    ProdConso(const AbsOperator& op, bool inPlace = false);
 
-    static std::unique_ptr<ProdConso> defaultModel(const Operator& op) {
+    static std::unique_ptr<ProdConso> defaultModel(const AbsOperator& op) {
         return std::make_unique<ProdConso>(op, false);
     }
 
-    static std::unique_ptr<ProdConso> inPlaceModel(const Operator& op) {
+    static std::unique_ptr<ProdConso> inPlaceModel(const AbsOperator& op) {
         return std::make_unique<ProdConso>(op, true);
     }
 
@@ -79,7 +79,7 @@ public:
     virtual ~ProdConso() = default;
 
 protected:
-    const Operator &mOp;
+    const AbsOperator &mOp;
     const bool mInPlace;
     std::vector<Elts_t> mNbConsumedData;
     std::vector<Elts_t> mNbProducedData;
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index 52056852bc454f65f7d12cfc0608e5b6b0b1d933..630441b0642ef74e36a8e521ddcce5e09342cfb1 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -352,7 +352,7 @@ struct DynamicAttributes::AnyUtils<py::object> : public DynamicAttributes::AnyUt
 
     size_t hash(const future_std::any& attr) const override final {
         // Here we are mixing Python and C++ hashes... if both are
-        // well implemented, this should not increase the collision 
+        // well implemented, this should not increase the collision
         // probability for the same number of stored hashes.
         return py::hash(future_std::any_cast<py::object>(attr));
     }
diff --git a/include/aidge/utils/Random.hpp b/include/aidge/utils/Random.hpp
index 73cbd1453b3d840d6da2c58eadd5c5f47e9e9070..741eb93f488e233089913bd1bf247f155d9d6b29 100644
--- a/include/aidge/utils/Random.hpp
+++ b/include/aidge/utils/Random.hpp
@@ -15,8 +15,8 @@
 #include <algorithm>
 #include <random>
 #include <vector>
-namespace Aidge {
 
+namespace Aidge {
 namespace Random {
 
 /**
@@ -55,7 +55,7 @@ inline void randShuffle(std::vector<unsigned int>& vec) {
     std::shuffle(vec.begin(), vec.end(), Aidge::Random::Generator::get());
 }
 
-}  // namespace Random
-}  // namespace Aidge
+} // namespace Random
+} // namespace Aidge
 
 #endif  // AIDGE_RANDOM_H_
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 636863e292eeb677055dea379441ce422a6c90d8..8abb46b05f8138361a1a4fbcc66f99319870a6c3 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -317,7 +317,7 @@ private:
 
         return false;
     }
-    
+
     template<std::size_t I = 0, typename... Tp>
     inline typename std::enable_if<I == sizeof...(Tp), void>::type appendAttr(const std::tuple<Tp...>& /*t*/, std::map<std::string, future_std::any>& /*attrs*/) const {}
 
diff --git a/python_binding/backend/pybind_OperatorImpl.cpp b/python_binding/backend/pybind_OperatorImpl.cpp
index 04172c3ff68641a9fe0d14f9a326cd17e7002912..07ba7a18a26127f81367600f7c2a601f23976002 100644
--- a/python_binding/backend/pybind_OperatorImpl.cpp
+++ b/python_binding/backend/pybind_OperatorImpl.cpp
@@ -84,7 +84,7 @@ void init_OperatorImpl(py::module& m){
     ;
 
     py::class_<OperatorImpl, std::shared_ptr<OperatorImpl>, pyOperatorImpl>(m, "OperatorImpl", py::dynamic_attr())
-    .def(py::init<const Operator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
+    .def(py::init<const AbsOperator&, const std::string&>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
     .def("forward", &OperatorImpl::forward)
     .def("backward", &OperatorImpl::backward)
     .def("prod_conso", &OperatorImpl::prodConso)
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
index 77abd1f39bb4d5375d2fc57c5bd5595e79f135fb..3735ce55f37e2946779fb9f738f2502df8275c49 100644
--- a/python_binding/data/pybind_DataProvider.cpp
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -31,6 +31,6 @@ void init_DataProvider(py::module& m){
         .def("__iter__", &DataProvider::iter)
         .def("__next__", &DataProvider::next)
         .def("__len__", &DataProvider::getNbBatch);
-    
+
 }
 }
diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp
index 4bc28a19d350236933c3b6c139e9e3a4d980fa3f..79db4d24eb15bfdecbed9370147ec258bc8336c9 100644
--- a/python_binding/data/pybind_Database.cpp
+++ b/python_binding/data/pybind_Database.cpp
@@ -37,4 +37,4 @@ void init_Database(py::module& m) {
         .def("len", &Database::getLen)
         .def("get_nb_modalities", &Database::getNbModalities);
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/data/pybind_TensorImpl.cpp b/python_binding/data/pybind_TensorImpl.cpp
index 4c664274ec2c33174f51dad34ba1591c323b2d87..ef925a52f6b7018f4d2ccfe9bc0546c1415eeacb 100644
--- a/python_binding/data/pybind_TensorImpl.cpp
+++ b/python_binding/data/pybind_TensorImpl.cpp
@@ -29,7 +29,7 @@ void init_TensorImpl(py::module& m){
 
   py::class_<TensorImpl_cpu<double>, std::shared_ptr<TensorImpl_cpu<double>>, TensorImpl>(m, "TensorImpl_cpu_float64")
     .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
-    
+
   py::class_<TensorImpl_cpu<float>, std::shared_ptr<TensorImpl_cpu<float>>, TensorImpl>(m, "TensorImpl_cpu_float32")
     .def(py::init<DeviceIdx_t, std::vector<DimSize_t>>());
 
diff --git a/python_binding/filler/pybind_Filler.cpp b/python_binding/filler/pybind_Filler.cpp
index a85c0d6cd6fa0367dfc26328d214c99a4288a3be..1f20689146d4e800de0f0c485912ac6fc241229e 100644
--- a/python_binding/filler/pybind_Filler.cpp
+++ b/python_binding/filler/pybind_Filler.cpp
@@ -144,4 +144,4 @@ void init_Filler(py::module &m) {
             py::arg("meanNorm") = 0.0, py::arg("scaling") = 1.0)
         ;
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index cd9b2a16f92a4e7ccd2a0f2f17e605a6b049c752..567a1e2fa9384bb501e335aec113579802234885 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -148,9 +148,9 @@ void init_GraphView(py::module& m) {
           //      })
           .def("get_ranked_nodes", &GraphView::getRankedNodes)
           .def("set_dataformat", &GraphView::setDataFormat, py::arg("dataformat"))
-          
+
             ;
 
      m.def("get_connected_graph_view", &getConnectedGraphView);
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index d8e77bb259cbcbae7940a09dc405bb8f50b5b79b..442c167d7e9f7139985528ccad59180545e696a4 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -23,7 +23,7 @@ namespace py = pybind11;
 namespace Aidge {
 void init_Node(py::module& m) {
     py::class_<Node, std::shared_ptr<Node>>(m, "Node")
-    .def(py::init<std::shared_ptr<Operator>, const std::string&>(), py::arg("op"), py::arg("name") = "")
+    .def(py::init<std::shared_ptr<AbsOperator>, const std::string&>(), py::arg("op"), py::arg("name") = "")
     .def("name", &Node::name,
     R"mydelimiter(
     Name of the Node.
@@ -36,7 +36,7 @@ void init_Node(py::module& m) {
 
     .def("get_operator", &Node::getOperator,
     R"mydelimiter(
-    Get the Operator object of the Node.
+    Get the AbsOperator object of the Node.
     )mydelimiter")
 
     .def("set_name", &Node::setName, py::arg("name"),
@@ -48,7 +48,7 @@ void init_Node(py::module& m) {
     :rtype: str
     )mydelimiter")
 
-    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"), 
+    .def("create_unique_name", &Node::createUniqueName, py::arg("base_name"),
     R"mydelimiter(
     Given a base name, generate a new name which is unique in all the GraphViews containing this node.
 
@@ -190,4 +190,4 @@ void init_Node(py::module& m) {
             return self(connectors);
         });
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_And.cpp b/python_binding/operator/pybind_And.cpp
index 08dddfc8168bb77086a3dd72aca45b110a4cbce9..56b52b26e7da37dadf70f218648a6a8fcc602306 100644
--- a/python_binding/operator/pybind_And.cpp
+++ b/python_binding/operator/pybind_And.cpp
@@ -31,4 +31,4 @@ void init_And(py::module& m) {
 			:param name : name of the node.
 		)mydelimiter");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_ArgMax.cpp b/python_binding/operator/pybind_ArgMax.cpp
index 3de54afd7a669347cc2b272cff9b87cf152be09a..4ad5cb6fb51811e99396a8d25f7778fc14d390f4 100644
--- a/python_binding/operator/pybind_ArgMax.cpp
+++ b/python_binding/operator/pybind_ArgMax.cpp
@@ -30,13 +30,13 @@ void init_ArgMax(py::module &m) {
     m, pyClassName.c_str(), py::multiple_inheritance(),
       R"mydelimiter(
 		Initialize an ArgMax operator.
-			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axis: int
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+			:param select_last_index: If True, selects the last index if there are multiple occurrences
 									of the max value. If False (default), selects the first occurrence.
 			:type select_last_index: bool
 		)mydelimiter")
@@ -57,13 +57,13 @@ void init_ArgMax(py::module &m) {
        py::arg("name") = "",
 	   R"mydelimiter(
         Initialize a node containing an ArgMax operator.
-			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1], 
+			:param axis: The axis along which to compute the max element. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axis: int
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param select_last_index: If True, selects the last index if there are multiple occurrences 
+			:param select_last_index: If True, selects the last index if there are multiple occurrences
 									of the max value. If False (default), selects the first occurrence.
 			:type select_last_index: bool
 			:param name : name of the node.
diff --git a/python_binding/operator/pybind_BatchNorm.cpp b/python_binding/operator/pybind_BatchNorm.cpp
index 9a1bdacd169beebc843448d23bdaf8502de437b4..c449fcd585e4fddcb8a3805f85de22d153541e4a 100644
--- a/python_binding/operator/pybind_BatchNorm.cpp
+++ b/python_binding/operator/pybind_BatchNorm.cpp
@@ -42,4 +42,4 @@ void init_BatchNorm(py::module &m) {
     declare_BatchNormOp<2>(m);
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_BitShift.cpp b/python_binding/operator/pybind_BitShift.cpp
index b4f6c90e54e781b011459be6e8e6e252e7347b00..43f85947a03226e638e93f265a2f2c8720630a63 100644
--- a/python_binding/operator/pybind_BitShift.cpp
+++ b/python_binding/operator/pybind_BitShift.cpp
@@ -25,8 +25,8 @@ void init_BitShift(py::module &m) {
     // Binding for BitShiftOp class
     auto pyBitShiftOp = py::class_<BitShift_Op, std::shared_ptr<BitShift_Op>, OperatorTensor>(m, "BitShiftOp", py::multiple_inheritance(),R"mydelimiter(
         BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
-        This class allows shifting tensor values either to the left or right based on the 
-        specified direction. The direction can be accessed and controlled using the 
+        This class allows shifting tensor values either to the left or right based on the
+        specified direction. The direction can be accessed and controlled using the
         BitShiftDirection enum.
         :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
         :type direction: BitShiftDirection
@@ -47,8 +47,8 @@ void init_BitShift(py::module &m) {
     m.def("BitShift", &BitShift, py::arg("direction") = BitShift_Op::BitShiftDirection::right, py::arg("name") = "",
         R"mydelimiter(
         BitShiftOp is a tensor operator that performs bitwise shifts on tensor elements.
-        This class allows shifting tensor values either to the left or right based on the 
-        specified direction. The direction can be accessed and controlled using the 
+        This class allows shifting tensor values either to the left or right based on the
+        specified direction. The direction can be accessed and controlled using the
         BitShiftDirection enum.
         :param direction: direction of the bit shift (BitShiftDirection.Left or BitShiftDirection.Right)
         :type direction: BitShiftDirection
diff --git a/python_binding/operator/pybind_Concat.cpp b/python_binding/operator/pybind_Concat.cpp
index 854f3783e9961bb5fd29746b88352438a43dd6e4..29f937155f47b1ef3e697c84f1b2cbd753b16d53 100644
--- a/python_binding/operator/pybind_Concat.cpp
+++ b/python_binding/operator/pybind_Concat.cpp
@@ -33,4 +33,4 @@ void init_Concat(py::module& m) {
     m.def("Concat", &Concat, py::arg("nb_inputs"), py::arg("axis"), py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Div.cpp b/python_binding/operator/pybind_Div.cpp
index d2ad60725533be0b9db269ce5e022ac8560e1d91..17dcb558da83a0acc097531d33c3517f329b0dd0 100644
--- a/python_binding/operator/pybind_Div.cpp
+++ b/python_binding/operator/pybind_Div.cpp
@@ -28,4 +28,4 @@ void init_Div(py::module& m) {
     m.def("Div", &Div, py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Erf.cpp b/python_binding/operator/pybind_Erf.cpp
index 6ca25f9569a53505385f37a02f3ab478a11f82a6..581adf986a9c742e0c5f749342e7e0bf9df7ffe4 100644
--- a/python_binding/operator/pybind_Erf.cpp
+++ b/python_binding/operator/pybind_Erf.cpp
@@ -29,4 +29,4 @@ void init_Erf(py::module& m) {
 
     m.def("Erf", &Erf, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Gather.cpp b/python_binding/operator/pybind_Gather.cpp
index 0aac0bbad69abb5faaaea3afd0183573db64b31f..70ea1cb25e890a957e24d9074eea35e24e389598 100644
--- a/python_binding/operator/pybind_Gather.cpp
+++ b/python_binding/operator/pybind_Gather.cpp
@@ -37,4 +37,4 @@ void init_Gather(py::module& m) {
     m.def("Gather", &Gather, py::arg("axis") = 0, py::arg("indices") = std::vector<std::int64_t>(), py::arg("gathered_shape") = std::vector<std::size_t>(), py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_GenericOperator.cpp b/python_binding/operator/pybind_GenericOperator.cpp
index 6af8fef88e411af0a3ecbe5a771bf7af24de411a..96d00543d3510b1d5fdfa0c48955461e82a30e67 100644
--- a/python_binding/operator/pybind_GenericOperator.cpp
+++ b/python_binding/operator/pybind_GenericOperator.cpp
@@ -65,4 +65,4 @@ void init_GenericOperator(py::module& m) {
             return genericNode;
         }, py::arg("type"), py::arg("nb_data"), py::arg("nb_param"), py::arg("nb_out"), py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Identity.cpp b/python_binding/operator/pybind_Identity.cpp
index 7599197226b2f8734c989755c6e7d3581a52974d..ac74919765c0b506d056d431e1ea186bb364ce4d 100644
--- a/python_binding/operator/pybind_Identity.cpp
+++ b/python_binding/operator/pybind_Identity.cpp
@@ -28,4 +28,4 @@ void init_Identity(py::module& m) {
     m.def("Identity", &Identity, py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_LeakyReLU.cpp b/python_binding/operator/pybind_LeakyReLU.cpp
index e031d3dfb3348c5aec5bd497b40ff261528725ad..b0fa406462d3481a57b13f8c0481d1a14bcfc317 100644
--- a/python_binding/operator/pybind_LeakyReLU.cpp
+++ b/python_binding/operator/pybind_LeakyReLU.cpp
@@ -28,4 +28,4 @@ void init_LeakyReLU(py::module& m) {
     m.def("LeakyReLU", &LeakyReLU, py::arg("negative_slope") = 0.0f, py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Ln.cpp b/python_binding/operator/pybind_Ln.cpp
index 50aa755821c257c174c4603404144dab4da26296..ae17f6910955c570b4011b990d679c8a970a1c2a 100755
--- a/python_binding/operator/pybind_Ln.cpp
+++ b/python_binding/operator/pybind_Ln.cpp
@@ -27,4 +27,4 @@ void init_Ln(py::module& m) {
 
     m.def("Ln", &Ln, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Memorize.cpp b/python_binding/operator/pybind_Memorize.cpp
index 3ac1122111aae1a9b7eb353399e46562ae51b0b1..5d50f12ebb76cbd637f74158efa9745c3942d3c7 100644
--- a/python_binding/operator/pybind_Memorize.cpp
+++ b/python_binding/operator/pybind_Memorize.cpp
@@ -30,4 +30,4 @@ void init_Memorize(py::module& m) {
     m.def("Memorize", &Memorize, py::arg("end_step"), py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Mul.cpp b/python_binding/operator/pybind_Mul.cpp
index 23949b5fe3b22edf5b7105abd0de29b727740e35..9052f169ec21363ac947979cbf5801b1b1bcfb7e 100644
--- a/python_binding/operator/pybind_Mul.cpp
+++ b/python_binding/operator/pybind_Mul.cpp
@@ -27,4 +27,4 @@ void init_Mul(py::module& m) {
     declare_registrable<Mul_Op>(m, "MulOp");
     m.def("Mul", &Mul, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index e22f88687eff6856ce57fab6621781ffc86873b4..a999df0b7f6ce59671b69ff60977e89ed3d38189 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -34,35 +34,35 @@ void init_Operator(py::module& m){
         .value("OptionalData", InputCategory::OptionalData)
         .value("OptionalParam", InputCategory::OptionalParam);
 
-    py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
-    .def("__repr__", &Operator::repr)
-    .def("backend", &Operator::backend)
-    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
-    .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
-    .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
-    .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
-    .def("get_raw_input", &Operator::getRawInput, py::arg("inputIdx"))
-    .def("nb_inputs", &Operator::nbInputs)
-    .def("nb_outputs", &Operator::nbOutputs)
-    .def("input_category", &Operator::inputCategory, py::arg("idx"),
+    py::class_<AbsOperator, std::shared_ptr<AbsOperator>>(m, "AbsOperator")
+    .def("__repr__", &AbsOperator::repr)
+    .def("backend", &AbsOperator::backend)
+    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&AbsOperator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
+    .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&AbsOperator::setInput), py::arg("inputIdx"), py::arg("data"))
+    .def("get_raw_output", &AbsOperator::getRawOutput, py::arg("outputIdx"))
+    .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&AbsOperator::setInput), py::arg("inputIdx"), py::arg("data"))
+    .def("get_raw_input", &AbsOperator::getRawInput, py::arg("inputIdx"))
+    .def("nb_inputs", &AbsOperator::nbInputs)
+    .def("nb_outputs", &AbsOperator::nbOutputs)
+    .def("input_category", &AbsOperator::inputCategory, py::arg("idx"),
     R"mydelimiter(
     Category of a specific input (Data or Param, optional or not).
     Data inputs exclude inputs expecting parameters (weights or bias).
 
     :rtype: InputCategory
     )mydelimiter")
-    .def("associate_input", &Operator::associateInput, py::arg("inputIdx"), py::arg("data"))
-    .def("set_datatype", &Operator::setDataType, py::arg("dataType"))
-    .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&Operator::setBackend), py::arg("name"), py::arg("device") = 0)
-    .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&Operator::setBackend), py::arg("backends"))
-    .def("forward", &Operator::forward)
-    // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the Operator is not deleted !
-    .def("set_impl", &Operator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
-    .def("type", &Operator::type)
-    .def("get_impl", &Operator::getImpl)
-    .def_property_readonly("attr", &Operator::attributes)
-    .def("set_back_edges", &Operator::setBackEdges, py::arg("input_indexes"))
-    .def("is_back_edge", &Operator::isBackEdge, py::arg("input_index"))
+    .def("associate_input", &AbsOperator::associateInput, py::arg("inputIdx"), py::arg("data"))
+    .def("set_datatype", &AbsOperator::setDataType, py::arg("dataType"))
+    .def("set_backend", py::overload_cast<const std::string&, DeviceIdx_t>(&AbsOperator::setBackend), py::arg("name"), py::arg("device") = 0)
+    .def("set_backend", py::overload_cast<const std::vector<std::pair<std::string, DeviceIdx_t>>&>(&AbsOperator::setBackend), py::arg("backends"))
+    .def("forward", &AbsOperator::forward)
+    // py::keep_alive forbide Python to garbage collect the implementation lambda as long as the AbsOperator is not deleted !
+    .def("set_impl", &AbsOperator::setImpl, py::arg("implementation"), py::keep_alive<1, 2>())
+    .def("type", &AbsOperator::type)
+    .def("get_impl", &AbsOperator::getImpl)
+    .def_property_readonly("attr", &AbsOperator::attributes)
+    .def("set_back_edges", &AbsOperator::setBackEdges, py::arg("input_indexes"))
+    .def("is_back_edge", &AbsOperator::isBackEdge, py::arg("input_index"))
     ;
 }
 }
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index 8c515e321207605c20acc9e5b02271906c9707d1..b140f89b8263aa6159ce676cbdb8bb4b79af9222 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -24,7 +24,7 @@
 namespace py = pybind11;
 namespace Aidge {
 void init_OperatorTensor(py::module& m){
-    py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, Operator>(m, "OperatorTensor")
+    py::class_<OperatorTensor, std::shared_ptr<OperatorTensor>, AbsOperator>(m, "OperatorTensor")
     .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
     .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
 
diff --git a/python_binding/operator/pybind_Pop.cpp b/python_binding/operator/pybind_Pop.cpp
index 2040f642bbfc0428be48a6f7ec21fa3aed20a371..0279d44b8cc2d3edf92d315c7f955192f9a0932b 100644
--- a/python_binding/operator/pybind_Pop.cpp
+++ b/python_binding/operator/pybind_Pop.cpp
@@ -27,4 +27,4 @@ void init_Pop(py::module& m) {
 
     m.def("Pop", &Pop, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Pow.cpp b/python_binding/operator/pybind_Pow.cpp
index ec29e3faa7c3efbc2b2dbe23372f57c30568b769..2d75f98225dbe2c39bc6455df9e29f301a0167fd 100644
--- a/python_binding/operator/pybind_Pow.cpp
+++ b/python_binding/operator/pybind_Pow.cpp
@@ -28,4 +28,4 @@ void init_Pow(py::module& m) {
 
     m.def("Pow", &Pow, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_ReLU.cpp b/python_binding/operator/pybind_ReLU.cpp
index 79720845cf21103d3a9257880e8d2068673e36f0..c60bf4f001d271b6efbb7a77fee8817a7ee6d5a9 100644
--- a/python_binding/operator/pybind_ReLU.cpp
+++ b/python_binding/operator/pybind_ReLU.cpp
@@ -28,4 +28,4 @@ void init_ReLU(py::module& m) {
 
     m.def("ReLU", &ReLU, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_ReduceMean.cpp b/python_binding/operator/pybind_ReduceMean.cpp
index 028e45755fb10bb01602959f721cf003cb1e5136..70323c2ba909abb47c37ab56b1db52f2d918489e 100644
--- a/python_binding/operator/pybind_ReduceMean.cpp
+++ b/python_binding/operator/pybind_ReduceMean.cpp
@@ -30,13 +30,13 @@ void declare_ReduceMeanOp(py::module &m) {
     m, pyClassName.c_str(), py::multiple_inheritance(),
       R"mydelimiter(
 		Initialize a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 		)mydelimiter")
@@ -60,13 +60,13 @@ void declare_ReduceMeanOp(py::module &m) {
        py::arg("name") = "",
 	   R"mydelimiter(
         Initialize a node containing a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 			:param name : name of the node.
diff --git a/python_binding/operator/pybind_ReduceSum.cpp b/python_binding/operator/pybind_ReduceSum.cpp
index eaa57ef1c663a03cfd59ce02c13c3c7028b69e01..7ff6e15080afa286423642d29259b43de4587dd4 100644
--- a/python_binding/operator/pybind_ReduceSum.cpp
+++ b/python_binding/operator/pybind_ReduceSum.cpp
@@ -30,13 +30,13 @@ void init_ReduceSum(py::module &m) {
     m, pyClassName.c_str(), py::multiple_inheritance(),
       R"mydelimiter(
 		Initialize a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 		)mydelimiter")
@@ -57,13 +57,13 @@ void init_ReduceSum(py::module &m) {
        py::arg("name") = "",
 	   R"mydelimiter(
         Initialize a node containing a ReduceMean operator.
-			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1], 
+			:param axes: Axes along which to do the reduction. The accepted range is [-r, r-1],
 						where r is the rank of the input tensor.
 			:type axes: List[int]
-			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False, 
+			:param keepdims: If True (default), retains the reduced dimensions with size 1. If False,
 							the reduced dimensions are removed.
 			:type keepdims: bool
-			:param noop_with_empty_axes: If True, the operator just copies the input, 
+			:param noop_with_empty_axes: If True, the operator just copies the input,
       if False, the operatpr reduces all the dimensions.
 			:type noop_with_empty_axes: bool
 			:param name : name of the node.
diff --git a/python_binding/operator/pybind_Reshape.cpp b/python_binding/operator/pybind_Reshape.cpp
index c0b0e8c30ef127d5cdcaf24ded75b83f06c86588..9c05a29fd069a3ae7f497061a4650f4cd74f21d9 100644
--- a/python_binding/operator/pybind_Reshape.cpp
+++ b/python_binding/operator/pybind_Reshape.cpp
@@ -27,4 +27,4 @@ void init_Reshape(py::module& m) {
     declare_registrable<Reshape_Op>(m, "ReshapeOp");
     m.def("Reshape", &Reshape, py::arg("shape") = std::vector<std::int64_t>(), py::arg("allowzero") = false, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Resize.cpp b/python_binding/operator/pybind_Resize.cpp
index 35321f525e486107af3715ce1c09f48b7c5cd60f..755ec6078d60417599e7e84048d548386a7a204c 100644
--- a/python_binding/operator/pybind_Resize.cpp
+++ b/python_binding/operator/pybind_Resize.cpp
@@ -27,4 +27,4 @@ void init_Resize(py::module& m) {
 
     m.def("Resize", &Resize, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Round.cpp b/python_binding/operator/pybind_Round.cpp
index e9ed0e473eaa820537590633a89ca47382d36672..743dc2fcbc15d40fef54d09cfe7aabcb96f0b1b3 100644
--- a/python_binding/operator/pybind_Round.cpp
+++ b/python_binding/operator/pybind_Round.cpp
@@ -26,11 +26,11 @@ void init_Round(py::module& m) {
     declare_registrable<Round_Op>(m, "RoundOp");
     m.def("Round", &Round, py::arg("name") = "", R"mydelimiter(
     RoundOp is a tensor operator that rounds the values of a tensor element-wise.
-        This class rounds each value to the nearest integer. In the case of halves, 
+        This class rounds each value to the nearest integer. In the case of halves,
         the rule is to round them to the nearest even integer.
         :param X: input tensor.
         :type X: tensor of type float, double, float16, or bfloat16.
         :param Y: output tensor with the same shape and type as the input tensor.
     )mydelimiter");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Scaling.cpp b/python_binding/operator/pybind_Scaling.cpp
index 22e8011a9cd37f80a0678f2629809d4412ba6fd2..25c5560c2f19e66ec83ff3d759f74623d30df671 100644
--- a/python_binding/operator/pybind_Scaling.cpp
+++ b/python_binding/operator/pybind_Scaling.cpp
@@ -30,4 +30,4 @@ void init_Scaling(py::module& m)
     m.def("Scaling", &Scaling, py::arg("scaling_factor") = 1.0f, py::arg("nb_bits") = 8, py::arg("is_output_unsigned") = true, py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Shape.cpp b/python_binding/operator/pybind_Shape.cpp
index b3511f31eeab7d5df679d16c3bfb89f51d75cdbe..c00d7f9167c281f9458634ab85576847dc126564 100644
--- a/python_binding/operator/pybind_Shape.cpp
+++ b/python_binding/operator/pybind_Shape.cpp
@@ -34,4 +34,4 @@ void init_Shape(py::module& m) {
     m.def("Shape", &Shape, py::arg("start") = 0, py::arg("end") = -1, py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Sigmoid.cpp b/python_binding/operator/pybind_Sigmoid.cpp
index db7fc7bfb60ff8360933e5f84ab54d4cec8df724..b2053234a166e9809dc87bc169ec9e3fe77510dc 100644
--- a/python_binding/operator/pybind_Sigmoid.cpp
+++ b/python_binding/operator/pybind_Sigmoid.cpp
@@ -27,4 +27,4 @@ void init_Sigmoid(py::module& m) {
 
     m.def("Sigmoid", &Sigmoid, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Slice.cpp b/python_binding/operator/pybind_Slice.cpp
index c8cae2592b966fff7ebfde1e5905ed31d5b22455..8c52c01e197ad114dbfa0ede15c9f0a5f7e109c7 100644
--- a/python_binding/operator/pybind_Slice.cpp
+++ b/python_binding/operator/pybind_Slice.cpp
@@ -42,4 +42,4 @@ void init_Slice(py::module& m) {
           py::arg("steps") = std::vector<std::int64_t>(),
           py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Softmax.cpp b/python_binding/operator/pybind_Softmax.cpp
index 3b98ab9dfa1590093c567a363f67d32d613651a2..c5cd6a2342d245cc2aa7aa6bf0d471a199107f4e 100644
--- a/python_binding/operator/pybind_Softmax.cpp
+++ b/python_binding/operator/pybind_Softmax.cpp
@@ -28,4 +28,4 @@ void init_Softmax(py::module& m) {
     declare_registrable<Softmax_Op>(m, "SoftmaxOp");
     m.def("Softmax", &Softmax, py::arg("axis"), py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index 9b3feda9f791e65a9c32f2bda3da4da450838b40..dd56ab78bc2a499a957ad0c1130abe2e72e63780 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -35,4 +35,4 @@ void init_Split(py::module& m) {
     m.def("Split", &Split, py::arg("nb_outputs"), py::arg("axis") = 0, py::arg("split") = std::vector<DimSize_t>(), py::arg("name") = "");
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Sqrt.cpp b/python_binding/operator/pybind_Sqrt.cpp
index ba0c5aab02349df4c50f960bbeb7df2082aa9233..6eacacaafc039427251fb61231d950a225499ac3 100644
--- a/python_binding/operator/pybind_Sqrt.cpp
+++ b/python_binding/operator/pybind_Sqrt.cpp
@@ -26,4 +26,4 @@ void init_Sqrt(py::module& m) {
     declare_registrable<Sqrt_Op>(m, "SqrtOp");
     m.def("Sqrt", &Sqrt, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Squeeze.cpp b/python_binding/operator/pybind_Squeeze.cpp
index ca90fb46af40189dbe66c320ecdd237470ffa112..5826797dadfeb82b74e4e1613788acec3574c490 100644
--- a/python_binding/operator/pybind_Squeeze.cpp
+++ b/python_binding/operator/pybind_Squeeze.cpp
@@ -27,7 +27,7 @@ void init_Squeeze(py::module &m) {
       m, "SqueezeOp", py::multiple_inheritance(),
 		R"mydelimiter(
 		Initialize squeeze operator
-		:param axes :   axes to squeeze between [-r;r-1] 
+		:param axes :   axes to squeeze between [-r;r-1]
 						with r = input_tensor.nbDims()
 						& r in [-128 , 127]
 		:type axes : :py:class: List[Int]
@@ -42,7 +42,7 @@ void init_Squeeze(py::module &m) {
         py::arg("name") = "",
         R"mydelimiter(
     Initialize a node containing a squeeze operator.
-	:param axes :   axes to squeeze between [-r;r-1] 
+	:param axes :   axes to squeeze between [-r;r-1]
 					with r = input_tensor.nbDims()
 					& r in [-128 , 127]
 	:type axes : :py:class: List[Int]
diff --git a/python_binding/operator/pybind_Sub.cpp b/python_binding/operator/pybind_Sub.cpp
index 52a622f0fdf6480a375d17c9729017fca32b3092..f692b12b71d8494917f9d1152c581c2ad7329dcf 100644
--- a/python_binding/operator/pybind_Sub.cpp
+++ b/python_binding/operator/pybind_Sub.cpp
@@ -27,4 +27,4 @@ void init_Sub(py::module& m) {
     declare_registrable<Sub_Op>(m, "SubOp");
     m.def("Sub", &Sub, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Tanh.cpp b/python_binding/operator/pybind_Tanh.cpp
index ded15ee78951d389d614d932e4a9c22bf310b814..7c802eb7d3b50916153ba9a2389e684c93172ca7 100644
--- a/python_binding/operator/pybind_Tanh.cpp
+++ b/python_binding/operator/pybind_Tanh.cpp
@@ -27,4 +27,4 @@ void init_Tanh(py::module& m) {
 
     m.def("Tanh", &Tanh, py::arg("name") = "");
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/python_binding/operator/pybind_Unsqueeze.cpp b/python_binding/operator/pybind_Unsqueeze.cpp
index 40c179c4064f07896113732a7e3c32db5f19c060..6c63d66cb49810e3ef32c84183f2564252824193 100644
--- a/python_binding/operator/pybind_Unsqueeze.cpp
+++ b/python_binding/operator/pybind_Unsqueeze.cpp
@@ -24,7 +24,7 @@ void init_Unsqueeze(py::module &m) {
       m, "UnsqueezeOp", py::multiple_inheritance(),
       R"mydelimiter(
 		Initialize an unsqueeze operator.
-		:param axes :   axes to unsqueeze between [-r;r-1] 
+		:param axes :   axes to unsqueeze between [-r;r-1]
 						with r = input_tensor.nbDims() + len(axes)
 		:type axes : :py:class: List[Int]
 		)mydelimiter")
@@ -39,7 +39,7 @@ void init_Unsqueeze(py::module &m) {
         py::arg("name") = "",
         R"mydelimiter(
     Initialize a node containing an unsqueeze operator.
-	:param axes :   axes to unsqueeze between [-r;r-1] 
+	:param axes :   axes to unsqueeze between [-r;r-1]
 					with r = input_tensor.nbDims() + len(axes)
 	:type axes : :py:class: List[Int]
     :param name : name of the node.
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index bac071e02db82790917276c2121ff26a3c9bf514..27fe28936616a9c8745c198b65dd43c69af0392d 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -172,6 +172,6 @@ void init_Aidge(py::module& m) {
     init_Filler(m);
 }
 
-}  // namespace Aidge
+} // namespace Aidge
 
 PYBIND11_MODULE(aidge_core, m) { Aidge::init_Aidge(m); }
diff --git a/python_binding/recipes/pybind_Recipes.cpp b/python_binding/recipes/pybind_Recipes.cpp
index 6908cbd912b506a7adb7f33a02416d0173174969..5db376ba6cbcec7ad4714a9c90c044b83e580cad 100644
--- a/python_binding/recipes/pybind_Recipes.cpp
+++ b/python_binding/recipes/pybind_Recipes.cpp
@@ -113,7 +113,7 @@ void init_Recipes(py::module &m)
     )mydelimiter");
 
   m.def("fuse_to_metaops", fuseToMetaOps, py::arg("graph_view"), py::arg("query"), py::arg("type") = "", R"mydelimiter(
-    Fuse each sub-graph matching a query in a Meta Operator.
+    Fuse each sub-graph matching a query in a Meta AbsOperator.
 
     :param graph_view: Graph view on which we want to apply the recipe
     :type graph_view: :py:class:`aidge_core.GraphView`
@@ -121,7 +121,7 @@ void init_Recipes(py::module &m)
     :type query: str
     :param type: Type name of the resulting meta operators
     :type type: str, optional
-    :return: Number of sub-graph actually fused in a Meta Operator.
+    :return: Number of sub-graph actually fused in a Meta AbsOperator.
     :rtype: int
     )mydelimiter");
 
diff --git a/python_binding/scheduler/pybind_MemoryManager.cpp b/python_binding/scheduler/pybind_MemoryManager.cpp
index 0f18db405bec0aee9637f2e5f2ecc7b71e502cc5..9e5bd66013d89f2deff4d402ba7ab79c8e48efc2 100644
--- a/python_binding/scheduler/pybind_MemoryManager.cpp
+++ b/python_binding/scheduler/pybind_MemoryManager.cpp
@@ -36,10 +36,10 @@ void init_MemoryManager(py::module& m)
         .def_readwrite("released", &MemoryManager::MemorySpace::released);
 
     py::class_<MemoryManager::MemoryPlane, std::shared_ptr<MemoryManager::MemoryPlane>>(m, "MemoryPlane")
-        .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>, 
+        .def(py::init<std::shared_ptr<MemoryManager::MemorySpace>,
                       MemoryManager::Clock_T, unsigned int, unsigned int,
                       unsigned int, unsigned int, unsigned int>(),
-                      py::arg("mem_space"), py::arg("clock"), py::arg("offset"), 
+                      py::arg("mem_space"), py::arg("clock"), py::arg("offset"),
                       py::arg("size"), py::arg("stride"), py::arg("length"), py::arg("count"))
         .def_readwrite("mem_space", &MemoryManager::MemoryPlane::memSpace)
         .def_readwrite("allocated", &MemoryManager::MemoryPlane::allocated)
diff --git a/python_binding/scheduler/pybind_ProdConso.cpp b/python_binding/scheduler/pybind_ProdConso.cpp
index abd6d5379178916b5842095d50a1de2155345b6f..863fd4a391a2b618f11fabb3fb8825d2ff003a4d 100644
--- a/python_binding/scheduler/pybind_ProdConso.cpp
+++ b/python_binding/scheduler/pybind_ProdConso.cpp
@@ -101,7 +101,7 @@ public:
 void init_ProdConso(py::module& m){
 
     py::class_<ProdConso, std::shared_ptr<ProdConso>, pyProdConso>(m, "ProdConso", py::dynamic_attr())
-    .def(py::init<const Operator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
+    .def(py::init<const AbsOperator&, bool>(), py::keep_alive<1, 1>(), py::keep_alive<1, 2>(), py::keep_alive<1,3>())
     .def_static("default_model", &ProdConso::defaultModel)
     .def_static("in_place_model", &ProdConso::inPlaceModel)
     .def("get_nb_required_data", &ProdConso::getNbRequiredData)
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index ca8d1f33086fb5093c76826e5a2f53df873badf5..663611319b70970d754a8047e5e29cf25f4d19ed 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -78,13 +78,13 @@ void init_Log(py::module& m){
     .def_static("set_console_level", &Log::setConsoleLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level displayed in the console.
-          Available `Level`s in ascending order : 
+          Available `Level`s in ascending order :
             - Level.Debug
             - Level.Info
             - Level.Notice
             - Level.Warn
             - Level.Error
-            - Level.Fatal          
+            - Level.Fatal
 
           :param level: Log level.
           :type level: Level
@@ -100,13 +100,13 @@ void init_Log(py::module& m){
     .def_static("set_file_level", &Log::setFileLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level saved in the log file.
-          Available `Level`s in ascending order : 
+          Available `Level`s in ascending order :
             - Level.Debug
             - Level.Info
             - Level.Notice
             - Level.Warn
             - Level.Error
-            - Level.Fatal          
+            - Level.Fatal
 
           :param level: Log level.
           :type level: Level
diff --git a/python_binding/utils/pybind_Random.cpp b/python_binding/utils/pybind_Random.cpp
index a1956d2d1e398cdb81673e7760a92bcde46e2de6..1a112317f549aaf4c8a46d84eb1829cdc233f63a 100644
--- a/python_binding/utils/pybind_Random.cpp
+++ b/python_binding/utils/pybind_Random.cpp
@@ -21,4 +21,4 @@ void init_Random(py::module &m) {
     py::class_<Random::Generator>(mRand, "Generator")
     .def_static("set_seed", Random::Generator::setSeed);
 }
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index e2215e704e32367a7ca273b067398bc19fc3fc01..0c99ba650c03d1b77083de79774555ed57a26d07 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -33,7 +33,7 @@ Aidge::ImplSpec::ImplSpec(const std::vector<IOSpec>& i, const std::vector<IOSpec
 Aidge::ImplSpec::ImplSpec(const Aidge::ImplSpec&) = default;
 Aidge::ImplSpec::~ImplSpec() noexcept = default;
 
-Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend):
+Aidge::OperatorImpl::OperatorImpl(const AbsOperator& op, const std::string& backend):
     mOp(op),
     mBackend(backend)
 {
diff --git a/src/graph/Node.cpp b/src/graph/Node.cpp
index c19eab12ae34418386b1481702f64e4a82e9f771..e967691a1dc52a45054f8edf9881e9a840c2bbf7 100644
--- a/src/graph/Node.cpp
+++ b/src/graph/Node.cpp
@@ -19,7 +19,7 @@
 #include "aidge/operator/Producer.hpp"
 #include "aidge/utils/Types.h"
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttributes> attrs)
+Aidge::Node::Node(std::shared_ptr<AbsOperator> op, std::shared_ptr<DynamicAttributes> attrs)
     : mAttrs(attrs),
       mOperator(op),
       mParents(std::vector<std::shared_ptr<Node>>(static_cast<std::size_t>(op->nbInputs()),
@@ -38,10 +38,10 @@ Aidge::Node::Node(std::shared_ptr<Operator> op, std::shared_ptr<DynamicAttribute
     }
 }
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, const DynamicAttributes& attrs)
+Aidge::Node::Node(std::shared_ptr<AbsOperator> op, const DynamicAttributes& attrs)
     : Node(op, std::make_shared<DynamicAttributes>(attrs)) {}
 
-Aidge::Node::Node(std::shared_ptr<Operator> op, const std::string& name)
+Aidge::Node::Node(std::shared_ptr<AbsOperator> op, const std::string& name)
     : Node(op, DynamicAttributes())
 {
     // ctor
@@ -415,7 +415,7 @@ Aidge::NodePtr Aidge::Node::cloneSharedOperators() const {
 }
 
 Aidge::NodePtr Aidge::Node::cloneSharedProducers() const {
-    std::shared_ptr<Operator> op =
+    std::shared_ptr<AbsOperator> op =
             (mOperator->type() == Producer_Op::Type) ? mOperator : mOperator->clone();
 
     return std::make_shared<Node>(op, mAttrs);
diff --git a/src/graphRegex/matchFsm/FsmGraph.cpp b/src/graphRegex/matchFsm/FsmGraph.cpp
index a56474e042cc44a68938b1d19e19a0c6841cb8cb..5e9b79ecb30e4c7d0f74266040b403b2aa1ecf93 100644
--- a/src/graphRegex/matchFsm/FsmGraph.cpp
+++ b/src/graphRegex/matchFsm/FsmGraph.cpp
@@ -10,7 +10,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
 
 //TODO
     std::vector<std::shared_ptr<MatchSolution>> FsmGraph::test(const std::vector<NodePtr>& startNodes){
-        
+
     std::vector<std::shared_ptr<Aidge::FsmNode>> startNodesFsm = getStartNodes();
     if(startNodes.size() != startNodesFsm.size()){
          throw std::runtime_error("bad number of Start nodes");
@@ -61,7 +61,7 @@ FsmGraph::FsmGraph(const std::string query):mQuery(query){
         walks.swap(nextWalks);
         nextWalks.clear();
     }
-    
+
     MatchResult allMatch(allValidContext,getNbSubFsm(),mQuery,startNodes);
     return allMatch.getSolutions();
 
diff --git a/src/nodeTester/ConditionalInterpreter.cpp b/src/nodeTester/ConditionalInterpreter.cpp
index f40e62305334f740057f88ef21cdab749d64bd99..7d370007956a1908bcbf327526c2eb655a14cee5 100644
--- a/src/nodeTester/ConditionalInterpreter.cpp
+++ b/src/nodeTester/ConditionalInterpreter.cpp
@@ -28,16 +28,16 @@ using namespace Aidge;
 
         ConditionalParser conditionalParser = ConditionalParser(ConditionalExpressions);
         mTree = conditionalParser.parse();
-        
+
         ///lambda by default
         mLambdaRegister.insert("getType",+[](NodePtr NodeOp){return NodeOp->type();});
 
     }
-    
+
     bool ConditionalInterpreter::isLambdaRegister(const std::string &key){
         return mLambdaRegister.isLambdaRegister(key);
     }
-    
+
     const std::string& ConditionalInterpreter::getKey(){
         return mKey;
     }
@@ -48,7 +48,7 @@ using namespace Aidge;
         mResolution.clear();
         try{
             std::vector< std::shared_ptr<ConditionalData>> r =  visit({mTree},nodeOp);
-   
+
             if (mResolution.size() != 1){
                 throw std::runtime_error("Multi output interpretation output");
             }else{
@@ -179,7 +179,7 @@ using namespace Aidge;
                 }catch(const std::exception& e){
                     std::ostringstream errorMessage;
                     errorMessage << "Error in visiting AST for node "<< nodeOp->name() << "\n\t" << e.what()  << "\n";
-                    throw std::runtime_error(errorMessage.str()); 
+                    throw std::runtime_error(errorMessage.str());
                 }
             }
 
@@ -236,11 +236,11 @@ using namespace Aidge;
         if (mResolution.size() < 2){
             throw std::runtime_error("EQ need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution.back(); 
+        auto a = mResolution.back();
         mResolution.pop_back();
-        auto b = mResolution.back(); 
+        auto b = mResolution.back();
  	    mResolution.pop_back();
-     
+
 
         if (a->getType() != b->getType()){
             throw std::runtime_error("EQ Unsupported between type :" + a->getType() +" "+ b->getType());
@@ -262,7 +262,7 @@ using namespace Aidge;
            throw std::runtime_error("EQ Unknown type encountered :" + a->getType() );
         }
 
-        
+
         mResolution.push_back(data);
     }
 
@@ -271,9 +271,9 @@ using namespace Aidge;
         if (mResolution.size() < 2){
              throw std::runtime_error("NEQ need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution.back(); 
+        auto a = mResolution.back();
  	    mResolution.pop_back();
-        auto b = mResolution.back(); 
+        auto b = mResolution.back();
  	    mResolution.pop_back();
 
         if (a->getType() != b->getType()){
@@ -293,7 +293,7 @@ using namespace Aidge;
            throw std::runtime_error("NEQ Unknown type encountered :" + a->getType() );
         }
 
-        
+
         mResolution.push_back(data);
     }
 
@@ -302,9 +302,9 @@ using namespace Aidge;
         if (mResolution.size() < 2){
            throw std::runtime_error("AND need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution.back(); 
+        auto a = mResolution.back();
  	    mResolution.pop_back();
-        auto b = mResolution.back(); 
+        auto b = mResolution.back();
  	    mResolution.pop_back();
 
 
@@ -316,7 +316,7 @@ using namespace Aidge;
         data->setValue<bool>( a->getValue<bool>() && b->getValue<bool>());
 
 
-        
+
         mResolution.push_back(data);
     }
 
@@ -325,9 +325,9 @@ using namespace Aidge;
         if (mResolution.size() < 2){
              throw std::runtime_error("OR need 2 arg and get :" + std::to_string(mResolution.size()));
         }
-        auto a = mResolution.back(); 
+        auto a = mResolution.back();
  	    mResolution.pop_back();
-        auto b = mResolution.back(); 
+        auto b = mResolution.back();
  	    mResolution.pop_back();
 
 
@@ -339,7 +339,7 @@ using namespace Aidge;
         data->setValue<bool>( a->getValue<bool>() || b->getValue<bool>());
 
 
-        
+
         mResolution.push_back(data);
     }
 
@@ -348,7 +348,7 @@ using namespace Aidge;
             if (mResolution.size() < 1){
                 throw std::runtime_error("NOT need 1 arg and get :" + std::to_string(mResolution.size()));
             }
-            auto a = mResolution.back(); 
+            auto a = mResolution.back();
  	        mResolution.pop_back();
 
             if (a->getType() != typeid(bool).name()){
@@ -358,7 +358,7 @@ using namespace Aidge;
              std::shared_ptr<ConditionalData> data = std::make_shared<ConditionalData>();
             data->setValue<bool>( !a->getValue<bool>() );
 
-            
+
             mResolution.push_back(data);
 
         }
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 033c476c8a9e865fdf9d5670e295c3e4fb6101b3..86fd1ad9ca2cd7922da88bb62103337ddec74f52 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -40,7 +40,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Add_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Add_Op::clone() const {
     return std::make_shared<Add_Op>(*this);
 }
 
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index f8c8e5e3f32fff8306184dfdf3baa87392479ebf..46b2295d386c09879a967a626da23b55190560ec 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -40,7 +40,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
 }
 
 template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::AvgPooling_Op<DIM>::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::AvgPooling_Op<DIM>::clone() const {
     return std::make_shared<AvgPooling_Op<DIM>>(*this);
 }
 
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index bcf3b29c45abe2c40788fd1ec0bad87db8ee227b..263221247b23b03a60ff46b72828d41e10543e8e 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -39,7 +39,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
 }
 
 template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::BatchNorm_Op<DIM>::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::BatchNorm_Op<DIM>::clone() const {
     return std::make_shared<BatchNorm_Op<DIM>>(*this);
 }
 
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 55efdd51d56f7db4f64880b967def661e5354af5..5448ebf584ed59f6bf0c53b1dcc792a25d5af4fd 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -43,7 +43,7 @@ Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Concat_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Concat_Op::clone() const {
     return std::make_shared<Concat_Op>(*this);
 }
 
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
index 6b8d05625b99aec05be4f531460a5d25c120a5e0..23ca0ae72622593a82f25fd96c2e9163db8a67b2 100644
--- a/src/operator/DepthToSpace.cpp
+++ b/src/operator/DepthToSpace.cpp
@@ -79,7 +79,7 @@ Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::DepthToSpace_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::DepthToSpace_Op::clone() const {
     return std::make_shared<DepthToSpace_Op>(*this);
 }
 
diff --git a/src/operator/Erf.cpp b/src/operator/Erf.cpp
index bd5f76f8aa7c0889311e4f922fec8d20168e24b5..e7196c16e31bdf5ff01736d5279a01ba19fd17d4 100644
--- a/src/operator/Erf.cpp
+++ b/src/operator/Erf.cpp
@@ -29,7 +29,7 @@ Aidge::Erf_Op::Erf_Op(const Aidge::Erf_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Erf_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Erf_Op::clone() const {
     return std::make_shared<Erf_Op>(*this);
 }
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index dd3ed7aba65cf1875d691d9bc2c8c94bb03856c7..0c171a9ea8959650a31cd42b24d680c524800ff0 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -23,7 +23,7 @@
 
 const std::string Aidge::FC_Op::Type = "FC";
 
-std::shared_ptr<Aidge::Operator> Aidge::FC_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::FC_Op::clone() const {
     return std::make_shared<FC_Op>(*this);
 }
 
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 99ccb7505cd959178e4bd7132e32552ea5a72ecf..e97c0c5c3ad359631c2e3691e985f04824983d94 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -40,7 +40,7 @@ Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
 }
 
 template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Fold_Op<DIM>::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Fold_Op<DIM>::clone() const {
     return std::make_shared<Fold_Op<DIM>>(*this);
 }
 
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 0ebc3e3bc81b15d9414d01f12a2768be6a7ddc42..46533f06d779bb836e10fa7a516766499e91d2f1 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -46,7 +46,7 @@ Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Gather_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Gather_Op::clone() const {
     return std::make_shared<Gather_Op>(*this);
 }
 
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 0f90a5a58dbd8d69b847022c336075ecc3f3d553..a65da81dd13bb5c42876173860eb8b1493ed9464 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -51,7 +51,7 @@ Aidge::GenericOperator_Op::GenericOperator_Op(const Aidge::GenericOperator_Op& o
 
 Aidge::GenericOperator_Op::~GenericOperator_Op() noexcept = default;
 
-std::shared_ptr<Aidge::Operator> Aidge::GenericOperator_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::GenericOperator_Op::clone() const {
     return std::make_shared<GenericOperator_Op>(*this);
 }
 
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index bbcfd0d28ca039318647d206af876727793e1bfc..5f091c6299585245e34c6ecb074d6e83aebb92b7 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -31,7 +31,7 @@ Aidge::GlobalAveragePooling_Op::GlobalAveragePooling_Op(const Aidge::GlobalAvera
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::GlobalAveragePooling_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::GlobalAveragePooling_Op::clone() const {
     return std::make_shared<GlobalAveragePooling_Op>(*this);
 }
 
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index d26679f8337390879c8f4c4d10deb883fb40e6da..bc7d60ec05634f9ab0f92cf1b39f8a0d66d8d99a 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -54,7 +54,7 @@ Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
 Aidge::GridSample_Op::~GridSample_Op() noexcept = default;
 
 
-std::shared_ptr<Aidge::Operator> Aidge::GridSample_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::GridSample_Op::clone() const {
     return std::make_shared<GridSample_Op>(*this);
 }
 
diff --git a/src/operator/Identity.cpp b/src/operator/Identity.cpp
index f0b8720bc1e22d8d6308460eabe436db8a4c9f6d..677c50ecc9b8d9f2eb1ea408ac52f0dab81a2741 100644
--- a/src/operator/Identity.cpp
+++ b/src/operator/Identity.cpp
@@ -34,7 +34,7 @@ Aidge::Identity_Op::Identity_Op(const Aidge::Identity_Op& op)
     mImpl = std::make_shared<Identity_OpImpl>(*this, op.backend());
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Identity_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Identity_Op::clone() const {
     return std::make_shared<Identity_Op>(*this);
 }
 
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index dea73f3101887c5213a02b029d344a34f74ba4af..37c4948be547cbbf8b11f312e86167cc2a00e50b 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -29,7 +29,7 @@ Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::LeakyReLU_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::LeakyReLU_Op::clone() const {
     return std::make_shared<LeakyReLU_Op>(*this);
 }
 
diff --git a/src/operator/Ln.cpp b/src/operator/Ln.cpp
index 90ae8d8c7dac464665828248c923a1f278dad79b..dff7f28994232e1b1a77a29e41811c85ea887570 100755
--- a/src/operator/Ln.cpp
+++ b/src/operator/Ln.cpp
@@ -29,7 +29,7 @@ Aidge::Ln_Op::Ln_Op(const Aidge::Ln_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Ln_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Ln_Op::clone() const {
     return std::make_shared<Ln_Op>(*this);
 }
 
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 668ffd04b7acb0e72b4a3313805fa89ca3466f32..1f830bb828b9e6e081bbf82803a7f294e59562b9 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -30,7 +30,7 @@ Aidge::MatMul_Op::MatMul_Op(const Aidge::MatMul_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::MatMul_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::MatMul_Op::clone() const {
     return std::make_shared<MatMul_Op>(*this);
 }
 
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index 5ce137fe6b6c0e4b7150bfc0f1182f6f8ee94850..cb72f70f8d2a678ec408b2324099b2f52bf278b2 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -46,7 +46,7 @@ Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
 }
 
 template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::MaxPooling_Op<DIM>::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::MaxPooling_Op<DIM>::clone() const {
     return std::make_shared<MaxPooling_Op<DIM>>(*this);
 }
 
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 61239071a99a9dfca8613ef78eba17757c4276b7..344811cc1597da10bcbdd230a987b553cd32c117 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -99,7 +99,7 @@ Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
     mOutputs[1] = mOutputs[0];
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Memorize_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Memorize_Op::clone() const {
     return std::make_shared<Memorize_Op>(*this);
 }
 
diff --git a/src/operator/MetaOperator.cpp b/src/operator/MetaOperator.cpp
index ab6bde74fb73011f7b49e6958d8cfa8320d0bc1b..19f5d8208d49b8127480eb4320e6235b45a34fbb 100644
--- a/src/operator/MetaOperator.cpp
+++ b/src/operator/MetaOperator.cpp
@@ -50,7 +50,7 @@ Aidge::MetaOperator_Op::MetaOperator_Op(const std::string& type, const std::shar
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::MetaOperator_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::MetaOperator_Op::clone() const {
     return std::make_shared<MetaOperator_Op>(type(), mGraph->clone());
 }
 
diff --git a/src/operator/Move.cpp b/src/operator/Move.cpp
index adabcd0d359927693965cec1987d2fad083328b9..14b1c0c8148441c5f5c56b82b77b0df48dfb64ac 100644
--- a/src/operator/Move.cpp
+++ b/src/operator/Move.cpp
@@ -36,7 +36,7 @@ Aidge::Move_Op::Move_Op(const Aidge::Move_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Move_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Move_Op::clone() const {
     return std::make_shared<Move_Op>(*this);
 }
 
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 3f163c9d6a572cc488c621a0ec6819ea68143304..f93c9475b29a2110d59e1f78d7e176ebd06c492a 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -33,7 +33,7 @@ Aidge::Mul_Op::Mul_Op(const Aidge::Mul_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Mul_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Mul_Op::clone() const {
     return std::make_shared<Mul_Op>(*this);
 }
 
diff --git a/src/operator/Operator.cpp b/src/operator/Operator.cpp
index bd09e9d1297ec612b08634f59bfe33f0802ef3fd..104dac6217417c936f2692dd8659b13aa3405ef0 100644
--- a/src/operator/Operator.cpp
+++ b/src/operator/Operator.cpp
@@ -20,62 +20,62 @@
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
-// constexpr Aidge::Operator::Operator(const char* type)
+// constexpr Aidge::Operator::AbsOperator(const char* type)
 //     : mType(type)
 // {
 // 	// ctor
 // }
 
-Aidge::Operator::~Operator() noexcept = default;
+Aidge::AbsOperator::~AbsOperator() noexcept = default;
 
 ///////////////////////////////////////////////////////
 //        IMPLEMENTATION
 ///////////////////////////////////////////////////////
 
-Aidge::Elts_t Aidge::Operator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::AbsOperator::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredData(): an implementation is required for {}!", type());
     return mImpl->prodConso()->getNbRequiredData(inputIdx);
 }
 
-Aidge::Elts_t Aidge::Operator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::AbsOperator::getNbRequiredProtected(const Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbRequiredProtected(): an implementation is required for {}!", type());
     return mImpl->prodConso()->getNbRequiredProtected(inputIdx);
 }
 
-Aidge::Elts_t Aidge::Operator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
+Aidge::Elts_t Aidge::AbsOperator::getRequiredMemory(const IOIndex_t outputIdx, const std::vector<DimSize_t> &inputsSize) const {
     AIDGE_ASSERT(mImpl != nullptr, "getRequiredMemory(): an implementation is required for {}!", type());
     return mImpl->prodConso()->getRequiredMemory(outputIdx, inputsSize);
 }
 
-Aidge::Elts_t Aidge::Operator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
+Aidge::Elts_t Aidge::AbsOperator::getNbConsumedData(Aidge::IOIndex_t inputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbConsumedData(): an implementation is required for {}!", type());
     return mImpl->prodConso()->getNbConsumedData(inputIdx);
 }
 
-Aidge::Elts_t Aidge::Operator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
+Aidge::Elts_t Aidge::AbsOperator::getNbProducedData(Aidge::IOIndex_t outputIdx) const {
     AIDGE_ASSERT(mImpl != nullptr, "getNbProducedData(): an implementation is required for {}!", type());
     return mImpl->prodConso()->getNbProducedData(outputIdx);
 }
-void Aidge::Operator::updateConsummerProducer(){
+void Aidge::AbsOperator::updateConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "updateConsummerProducer(): an implementation is required for {}!", type());
     mImpl->prodConso()->updateConsummerProducer();
 }
-void Aidge::Operator::resetConsummerProducer(){
+void Aidge::AbsOperator::resetConsummerProducer(){
     AIDGE_ASSERT(mImpl != nullptr, "resetConsummerProducer(): an implementation is required for {}!", type());
     mImpl->prodConso()->resetConsummerProducer();
 }
 
-void Aidge::Operator::forward() {
+void Aidge::AbsOperator::forward() {
     AIDGE_ASSERT(mImpl != nullptr, "forward(): an implementation is required for {}!", type());
     mImpl->forward();
 }
 
-void Aidge::Operator::backward() {
+void Aidge::AbsOperator::backward() {
     AIDGE_ASSERT(mImpl != nullptr, "backward(): an implementation is required for {}!", type());
     mImpl->backward();
 }
 
-void Aidge::Operator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) {
+void Aidge::AbsOperator::setBackend(const std::vector<std::pair<std::string, DeviceIdx_t>>& backends) {
     const auto& availableBackends = getAvailableBackends();
     // By default, try to set the last backend anyway
     auto selectedBackend = backends.back();
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index ff6fb9ce4b6b8596477dfdd1f43f8927e534459b..d5f0794538c2a1e1a657546e8e1f2e7842aca2a0 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -21,7 +21,7 @@
 Aidge::OperatorTensor::OperatorTensor(const std::string& type,
                                       const std::vector<InputCategory>& inputsCategory,
                                                             const IOIndex_t nbOut)
-: Operator(type, inputsCategory, nbOut, OperatorType::Tensor),
+: AbsOperator(type, inputsCategory, nbOut, OperatorType::Tensor),
         mInputs(std::vector<std::shared_ptr<Tensor>>(inputsCategory.size(), nullptr)),
         mOutputs(std::vector<std::shared_ptr<Tensor>>(nbOut)) {
     for (std::size_t i = 0; i < static_cast<std::size_t>(nbOut); ++i) {
@@ -32,7 +32,7 @@ Aidge::OperatorTensor::OperatorTensor(const std::string& type,
 
 
 Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other)
-    : Operator(other),
+    : AbsOperator(other),
         mInputs(std::vector<std::shared_ptr<Tensor>>(other.nbInputs(), nullptr)),
         mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) {
     for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) {
@@ -203,5 +203,5 @@ void Aidge::OperatorTensor::forward() {
         forwardDims(true);
     }
 
-    Operator::forward();
+    AbsOperator::forward();
 }
diff --git a/src/operator/Pad.cpp b/src/operator/Pad.cpp
index 39f61e328bd3f98bc836604462bbfc064fbb93be..505f1cf71354910b7cb5021cb5610554d1f9fe43 100644
--- a/src/operator/Pad.cpp
+++ b/src/operator/Pad.cpp
@@ -23,7 +23,7 @@ template <Aidge::DimIdx_t DIM>
 const std::string Aidge::Pad_Op<DIM>::Type = "Pad";
 
 template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Pad_Op<DIM>::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Pad_Op<DIM>::clone() const {
     return std::make_shared<Pad_Op<DIM>>(*this);
 }
 
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index cd5b18759cdd743f292054bca91ffee5da722ea6..9278a056a2b8fbdc26ef2e27dfd83d70221c605f 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -58,7 +58,7 @@ Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Pop_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Pop_Op::clone() const {
     return std::make_shared<Pop_Op>(*this);
 }
 
@@ -74,7 +74,7 @@ bool Aidge::Pop_Op::forwardDims(bool /*allowDataDependency*/) {
 }
 
 void Aidge::Pop_Op::updateConsummerProducer() {
-    Operator::updateConsummerProducer();
+    AbsOperator::updateConsummerProducer();
     mAttributes->template getAttr<PopAttr::ForwardStep>() = 0;
 }
 
@@ -93,7 +93,7 @@ std::set<std::string> Aidge::Pop_Op::getAvailableBackends() const {
 }
 
 void Aidge::Pop_Op::forward() {
-    Operator::forward();
+    AbsOperator::forward();
     ++mAttributes->template getAttr<PopAttr::ForwardStep>();
 }
 
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 3d48b88ab400596d68cbfa34502e795766ff94f0..7342447e2cbd93eda77291d221ff20ec98204214 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -70,7 +70,7 @@ Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Producer_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Producer_Op::clone() const {
     return std::make_shared<Producer_Op>(*this);
 }
 
diff --git a/src/operator/ReLU.cpp b/src/operator/ReLU.cpp
index bda26fa3332ee914325820f47d0babcb622905c8..efe418158c0129538d2df2e4ddd798c8a2cd35d0 100644
--- a/src/operator/ReLU.cpp
+++ b/src/operator/ReLU.cpp
@@ -29,7 +29,7 @@ Aidge::ReLU_Op::ReLU_Op(const Aidge::ReLU_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::ReLU_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::ReLU_Op::clone() const {
     return std::make_shared<ReLU_Op>(*this);
 }
 
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 7935edb050824af92a8f130f975aa09e41ca875f..13896df57a36f7b4eef4585279188dd81561b768 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -46,7 +46,7 @@ Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::ReduceMean_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::ReduceMean_Op::clone() const {
     return std::make_shared<ReduceMean_Op>(*this);
 }
 
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 0fa9a62816a36ad3afece02052224c966ee121a3..070ca5c77735f21090a77a43a6e1591b0bdab8c4 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -53,7 +53,7 @@ Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Reshape_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Reshape_Op::clone() const {
     return std::make_shared<Reshape_Op>(*this);
 }
 
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 9e5762452e382a31c1e5da25708507653da2e474..2d3cc6541003b4c3f79b8358cde47e9a45969d30 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -50,7 +50,7 @@ Aidge::Resize_Op::Resize_Op(const Aidge::Resize_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Resize_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Resize_Op::clone() const {
     return std::make_shared<Resize_Op>(*this);
 }
 
diff --git a/src/operator/Round.cpp b/src/operator/Round.cpp
index ba4eff9d1e1cf06cc5a4bbda54010aec8c2f2f63..8eb8fb5d2a3e37afd8b216c8919313fc487ab017 100644
--- a/src/operator/Round.cpp
+++ b/src/operator/Round.cpp
@@ -32,7 +32,7 @@ Aidge::Round_Op::Round_Op(const Aidge::Round_Op& op)
 }
 
 
-std::shared_ptr<Aidge::Operator> Aidge::Round_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Round_Op::clone() const {
     return std::make_shared<Round_Op>(*this);
 }
 
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index 5ac08cd2245e0caa3ca7072c70ccc69bcfcf9558..da59f360c2e391f9e7ba89815a4a5bbeb00f74fa 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -39,7 +39,7 @@ Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Scaling_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Scaling_Op::clone() const {
     return std::make_shared<Scaling_Op>(*this);
 }
 
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 29a9ee6252a0c2baa6e07bc56e60650685db6bdd..37a649400059977c017c90388ee26464c7e59f21 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -55,7 +55,7 @@ Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Shape_Op::clone() const {
     return std::make_shared<Shape_Op>(*this);
 }
 
diff --git a/src/operator/ShiftGELU.cpp b/src/operator/ShiftGELU.cpp
index bd229e6cf58a430922d08cff5301aa16ef636d5e..53d75d872ca3d6d2af14c387bb4f1494a3b1530a 100644
--- a/src/operator/ShiftGELU.cpp
+++ b/src/operator/ShiftGELU.cpp
@@ -33,7 +33,7 @@ Aidge::ShiftGELU_Op::ShiftGELU_Op(const Aidge::ShiftGELU_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::ShiftGELU_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::ShiftGELU_Op::clone() const {
     return std::make_shared<ShiftGELU_Op>(*this);
 }
 
diff --git a/src/operator/ShiftMax.cpp b/src/operator/ShiftMax.cpp
index 58d4bf46100ce116ad4a179e972cbef81bc5b5c1..ccc9fedc113cc4c4abc66fe00ca2907b7a32f41f 100644
--- a/src/operator/ShiftMax.cpp
+++ b/src/operator/ShiftMax.cpp
@@ -37,7 +37,7 @@ Aidge::ShiftMax_Op::ShiftMax_Op(const Aidge::ShiftMax_Op& op)
  * @brief Clone the operator using its copy-constructor.
  * @see Operator::ShiftMax_Op
  */
-std::shared_ptr<Aidge::Operator> Aidge::ShiftMax_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::ShiftMax_Op::clone() const {
     return std::make_shared<ShiftMax_Op>(*this);
 }
 
diff --git a/src/operator/Sigmoid.cpp b/src/operator/Sigmoid.cpp
index d97f8c52341dee4e6e0840afa6e023d8a4e3fd52..fc2c2691228c88f65ede6885a58a6f378b90d419 100644
--- a/src/operator/Sigmoid.cpp
+++ b/src/operator/Sigmoid.cpp
@@ -32,7 +32,7 @@ Aidge::Sigmoid_Op::Sigmoid_Op(const Aidge::Sigmoid_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Sigmoid_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Sigmoid_Op::clone() const {
     return std::make_shared<Sigmoid_Op>(*this);
 }
 
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 3bdee8c13c1759261140d634940b0a4e81210084..a277a5ea782ebab35e7324fd28747298e6a23fa1 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -57,7 +57,7 @@ Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op &op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Slice_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Slice_Op::clone() const {
     return std::make_shared<Slice_Op>(*this);
 }
 
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index ad894c5e56a674a452d0388f88a7e4ad268dd216..96f8adc61c9df0e00bef383cfee59e32d5d57a09 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -37,7 +37,7 @@ Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Softmax_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Softmax_Op::clone() const {
     return std::make_shared<Softmax_Op>(*this);
 }
 
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index e3ed13588d8c2b5ddde91d37fc926d675f0666a3..98146b956ea72e6fa136a022eb69ba36715c4147 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -79,7 +79,7 @@ Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Split_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Split_Op::clone() const {
     return std::make_shared<Split_Op>(*this);
 }
 
diff --git a/src/operator/Sqrt.cpp b/src/operator/Sqrt.cpp
index bd3286f098cd5c6985d7f33f88b723523ef94765..5f81d29ff2770e951b695ab4016b66109a419a3d 100644
--- a/src/operator/Sqrt.cpp
+++ b/src/operator/Sqrt.cpp
@@ -32,7 +32,7 @@ Aidge::Sqrt_Op::Sqrt_Op(const Aidge::Sqrt_Op& op)
 }
 
 
-std::shared_ptr<Aidge::Operator> Aidge::Sqrt_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Sqrt_Op::clone() const {
     return std::make_shared<Sqrt_Op>(*this);
 }
 
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index ca7348b3b415375c09ac1cfd69ac3d6f6e3488eb..111bb1ee35425d204c17179ebd71bd10bdec5a1a 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -34,7 +34,7 @@ Aidge::Sub_Op::Sub_Op(const Aidge::Sub_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Sub_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Sub_Op::clone() const {
     return std::make_shared<Sub_Op>(*this);
 }
 
diff --git a/src/operator/Tanh.cpp b/src/operator/Tanh.cpp
index fe295ab71b67e8e62562066b1464ffba6e8ae404..9e4301c32794355be888889e0b2d3772e1eb3f9e 100644
--- a/src/operator/Tanh.cpp
+++ b/src/operator/Tanh.cpp
@@ -32,7 +32,7 @@ Aidge::Tanh_Op::Tanh_Op(const Aidge::Tanh_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Tanh_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Tanh_Op::clone() const {
     return std::make_shared<Tanh_Op>(*this);
 }
 
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 0cb1717f1c96c393b8845db129eee1429966cd98..895a8d3bb08e70d0c804c163113a4871cff49dca 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -52,7 +52,7 @@ Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
     }
 }
 
-std::shared_ptr<Aidge::Operator> Aidge::Transpose_Op::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Transpose_Op::clone() const {
     return std::make_shared<Transpose_Op>(*this);
 }
 
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 53b8bd5442081e601a55853115f44067ae17fc2b..3161ffa1eae0336469b6a21eb35216a5daa06eb8 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -99,7 +99,7 @@ Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
 }
 
 template <Aidge::DimIdx_t DIM>
-std::shared_ptr<Aidge::Operator> Aidge::Unfold_Op<DIM>::clone() const {
+std::shared_ptr<Aidge::AbsOperator> Aidge::Unfold_Op<DIM>::clone() const {
     return std::make_shared<Unfold_Op>(*this);
 }
 
diff --git a/src/scheduler/ProdConso.cpp b/src/scheduler/ProdConso.cpp
index a3bff53c3643a5da361dec5944f47a27f148a995..8b79094fe967f1552259a11406a2fa1ebdfbaf4d 100644
--- a/src/scheduler/ProdConso.cpp
+++ b/src/scheduler/ProdConso.cpp
@@ -17,7 +17,7 @@
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/ErrorHandling.hpp"
 
-Aidge::ProdConso::ProdConso(const Operator& op, bool inPlace):
+Aidge::ProdConso::ProdConso(const AbsOperator& op, bool inPlace):
     mOp(op),
     mInPlace(inPlace),
     mNbConsumedData(mOp.nbInputs(), Elts_t::NoneElts()),
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index da32a8e0ec6a3c9f27da5c47f9e6166c1fc879bc..f781735bad9a5c6f3a5369ee1cefc05585060f4f 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -62,7 +62,7 @@ std::vector<std::string> Aidge::Log::mContext;
 void Aidge::Log::log(Level level, const std::string& msg) {
     if (level >= mConsoleLevel) {
         // Apply log level style only for console.
-        // Styles that were already applied to msg with fmt are kept also in 
+        // Styles that were already applied to msg with fmt are kept also in
         // the log file.
         const auto modifier
             = !mConsoleColor ? fmt::text_style()
diff --git a/unit_tests/graphRegex/Test_examples.cpp b/unit_tests/graphRegex/Test_examples.cpp
index d85ae5c893a7ae4497125a62dad3cde97dac5195..a0fbbfa6eabd661a33cb576436be69b4865c5133 100644
--- a/unit_tests/graphRegex/Test_examples.cpp
+++ b/unit_tests/graphRegex/Test_examples.cpp
@@ -52,4 +52,4 @@ TEST_CASE("Examples", "[GraphMatching]") {
     }
 }
 
-}  // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_BitShift_Op.cpp b/unit_tests/operator/Test_BitShift_Op.cpp
index 39916e4e75779ecc63680b43ece8ccd2bdc667c9..85a456920cd6eae05260fbf50ffe16e16db2bddd 100644
--- a/unit_tests/operator/Test_BitShift_Op.cpp
+++ b/unit_tests/operator/Test_BitShift_Op.cpp
@@ -20,7 +20,7 @@
 #include "aidge/operator/OperatorTensor.hpp"
 
 namespace Aidge {
-TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]") 
+TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]")
 {
     constexpr std::uint16_t NBTRIALS = 10;
 
@@ -101,7 +101,7 @@ TEST_CASE("[core/operator] BitShift_Op(forwardDims)", "[BitShift][forwardDims]")
         }
     }
     SECTION("BitShifOP Test dimensions [Wrong Dimensions]") {
-        
+
        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
             const std::size_t nb_dims = nbDimsDist(gen) + 1;
             std::vector<std::size_t> dims0(nb_dims);
diff --git a/unit_tests/operator/Test_ConvDepthWise_Op.cpp b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
index 6008e3bfac346725935d5d8ffe87f392c49a3409..819217b319f7740f47ca42ab5641480fdf9a7da3 100644
--- a/unit_tests/operator/Test_ConvDepthWise_Op.cpp
+++ b/unit_tests/operator/Test_ConvDepthWise_Op.cpp
@@ -70,4 +70,4 @@ TEST_CASE("[core/operator] ConvDepthWise_Op(computeReceptiveField)", "[Operator]
         REQUIRE(((res1[0].first == std::vector<DimSize_t>({5, 0, 100, 100})) && (res1[0].second == std::vector<DimSize_t>({1, 1, 8, 8}))));
     }
 }
-}  // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Conv_Op.cpp b/unit_tests/operator/Test_Conv_Op.cpp
index bc24fc8081d78dedf853450ff648b6d91b47c1dc..813892531034b2accdfb9a7ce83e68702678a732 100644
--- a/unit_tests/operator/Test_Conv_Op.cpp
+++ b/unit_tests/operator/Test_Conv_Op.cpp
@@ -83,4 +83,4 @@ TEST_CASE("[core/operator] Conv_Op(computeReceptiveField)", "[Operator][computeR
         //           << res1[0].second[3] << "}" << std::endl;
     }
 }
-}  // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Operator.cpp b/unit_tests/operator/Test_Operator.cpp
index a050bbc4021b0c70a0d8faf6478eb2bd13ebdb58..8ca53404cb78df5ade01273a56eaab8015634429 100644
--- a/unit_tests/operator/Test_Operator.cpp
+++ b/unit_tests/operator/Test_Operator.cpp
@@ -23,7 +23,7 @@
 #include "aidge/operator/Producer.hpp"
 
 namespace Aidge {
-// TEST_CASE("[core/operator] Operator(computeReceptiveField)", "[Operator][computeReceptiveFiled]") {
+// TEST_CASE("[core/operator] AbsOperator(computeReceptiveField)", "[AbsOperator][computeReceptiveFiled]") {
 //     auto dataProvider1 = Producer({16, 3, 224, 224}, "dataProvider1");
 //     auto dataProvider2 = Producer({16, 3, 224, 224}, "dataProvider2");
 //     auto gen1 = Add(2);
@@ -47,4 +47,4 @@ namespace Aidge {
 //         REQUIRE(((res2[0].first == gen2->getOperator()->input(0).getIdx({3,2,100,28})) && (res2[0].second == std::vector<DimSize_t>({1, 1, 30, 40}))));
 //     }
 // }
-}  // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/recipes/Test_FuseToMetaOps.cpp b/unit_tests/recipes/Test_FuseToMetaOps.cpp
index 9fceedf2feef0a3ed79b83a8494a1a2b49f77291..80af0c3759110665d1d043f87bc24e188b00e950 100644
--- a/unit_tests/recipes/Test_FuseToMetaOps.cpp
+++ b/unit_tests/recipes/Test_FuseToMetaOps.cpp
@@ -41,4 +41,4 @@ TEST_CASE("[cpu/recipes] FuseToMetaOps", "[FuseToMetaOps][recipes]") {
     REQUIRE(nbFused == 2);
 }
 
-}  // namespace Aidge
+} // namespace Aidge
diff --git a/unit_tests/recipes/Test_MatMulToFC.cpp b/unit_tests/recipes/Test_MatMulToFC.cpp
index 2adf882ca69e0d5ca5f050d1b89cfb09d81b536b..ba691e10ebba467542b7a37e2cd93b80b9d5e1d9 100644
--- a/unit_tests/recipes/Test_MatMulToFC.cpp
+++ b/unit_tests/recipes/Test_MatMulToFC.cpp
@@ -115,4 +115,4 @@ TEST_CASE("[cpu/recipes] MatMulToFC", "[MatMulToFC][recipes]") {
     }
 }
 
-}  // namespace Aidge
+} // namespace Aidge