diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 9b2c6d76e470f55e91758b6a945d5ebcab7a9752..927911056c3a6772189ca17c0064f6f61e53feca 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -885,6 +885,10 @@ public:
         // First refFrom, to ensure that fallback, if required, is also on desired device
         return refFrom(fallback, backend, device).refCast(fallback, dt);
     }
+    const Tensor& refCastFrom(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const {
+        // First refFrom, to ensure that fallback, if required, is also on desired device
+        return refFrom(fallback, backend, device).refCast(fallback, dt);
+    }
 
     /**
      * Return a reference to a Tensor with same characteristics
@@ -904,6 +908,10 @@ public:
         const auto& device = targetReqs.getImpl()->device();
         return refCastFrom(fallback, targetReqs.dataType(), device.first, device.second);
     }
+    const Tensor& refCastFrom(std::shared_ptr<Tensor>& fallback, const Tensor& targetReqs) const {
+        const auto& device = targetReqs.getImpl()->device();
+        return refCastFrom(fallback, targetReqs.dataType(), device.first, device.second);
+    }
 
     /**
      * @brief Return a reference to a Tensor on desired data type and backend/device:
@@ -941,6 +949,10 @@ public:
         const auto& device = targetReqs.getImpl()->device();
         return ref(fallback, targetReqs.dataType(), device.first, device.second);
     }
+    const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Tensor& targetReqs) const {
+        const auto& device = targetReqs.getImpl()->device();
+        return ref(fallback, targetReqs.dataType(), device.first, device.second);
+    }
 
 
     /**
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 3b10d9c215cc0ea48062a5de4064a3ad45bdd7b6..1aaa82a0e19ef0a83efbe79f5b5e09efc4a2d82e 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -170,7 +170,7 @@ public:
      */
     Pad_Op(const Pad_Op& op)
         : OperatorTensor(op),
-          mAttributes(op.mAttributes) {}
+          mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 73f59c25d43e8c78cfd9feb42eefcfd94f8680a1..9900a79d38d02590c13fa7f20353e441deeb9b78 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -112,7 +112,7 @@ public:
      */
     ReduceSum_Op(const ReduceSum_Op& op)
         : OperatorTensor(op),
-          mAttributes(op.mAttributes)
+          mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
     {
         if (op.mImpl){
             SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 32ddbe48804e359a9a868a149e66c43342b76d56..6af6f85a9137570c8380fc1af89d362da99bffa7 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -157,7 +157,7 @@ public:
      * @param op Operator to copy.
      */
     Resize_Op(const Resize_Op &op)
-        : OperatorTensor(op), mAttributes(op.mAttributes) {
+        : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Resize_Op, *this, op.backend());
         } else {
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index d2d06fa5f2c2406d35b35023ace0bf42332c3f84..452468278ff305465c4be97a0678abdfca30ac35 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -258,6 +258,7 @@ private:
     static int mFloatingPointPrecision;
 };
 
+std::string htmlEscape(const std::string& data);
 } // namespace Aidge
 
 namespace {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 9d6557054f2f584e4dc4137ea7fdf544aba8fff3..aecec559d4d794559f78f213e1c268249abe8117 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -106,14 +106,15 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         std::string attrs;
         // Ignore name attribute (if size == 1)
         if (node_ptr->attributes()->getAttrs().size() > 1) {
+          attrs = htmlEscape(fmt::format("{}", *node_ptr->attributes()));
           attrs = fmt::format("&nbsp;<sup><span title=\"{}\" style=\"cursor: pointer; font-weight: bold; color: blue\">[{}]</span></sup>",
-            *node_ptr->attributes(), node_ptr->attributes()->getAttrs().size());
+            attrs, node_ptr->attributes()->getAttrs().size());
         }
 
         std::string givenName =
             (node_ptr->name().empty())
                 ? "\"<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>" + attrs + "\""
-                : "\"" + node_ptr->name() + attrs + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
+                : "\"" + htmlEscape(node_ptr->name()) + attrs + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
 
         if (verbose) {
           givenName += "<br/><span style='color:white; background-color: purple; float: right'>" + node_ptr->getOperator()->backend() + "</span>";
@@ -1092,7 +1093,25 @@ bool Aidge::GraphView::add(std::pair<NodePtr, std::set<NodePtr>> nodes, bool inc
 bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph, bool includeLearnableParam) {
     // set the rootNode to the other graphView rootNode if no rootNode yet
     mRootNode = mRootNode ? mRootNode : graph->rootNode();
-    return add(graph->getNodes(), includeLearnableParam);
+    auto orderedInputs = mInputNodes;
+    auto orderedOutputs = mOutputNodes;
+    const auto res = add(graph->getNodes(), includeLearnableParam);
+
+    // Keep inputs/outputs relative order. The ordered inputs/outputs of the added graph
+    // are added to the ordered inputs/outputs of the current graph, after the existing ones.
+    orderedInputs.insert(orderedInputs.end(), graph->getOrderedInputs().begin(), graph->getOrderedInputs().end());
+    orderedInputs.erase(std::remove_if(orderedInputs.begin(), orderedInputs.end(), [this](const auto& x) {
+        return std::find(mInputNodes.begin(), mInputNodes.end(), x) == mInputNodes.end();
+    }), orderedInputs.end());
+    setOrderedInputs(orderedInputs);
+
+    orderedOutputs.insert(orderedOutputs.end(), graph->getOrderedOutputs().begin(), graph->getOrderedOutputs().end());
+    orderedOutputs.erase(std::remove_if(orderedOutputs.begin(), orderedOutputs.end(), [this](const auto& x) {
+        return std::find(mOutputNodes.begin(), mOutputNodes.end(), x) == mOutputNodes.end();
+    }), orderedOutputs.end());
+    setOrderedOutputs(orderedOutputs);
+
+    return res;
 }
 
 void Aidge::GraphView::addChild(std::shared_ptr<Node> toOtherNode,
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 8833452a88b1ddf79f85a99fa604e1acf9293c13..5abb4e9693e01d37e174b8c1f10bd45bb8f7d27d 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -34,7 +34,7 @@ Aidge::ArgMax_Op::ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_
 
 Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 966063cd05eb728fc6a976e9ae7949da36a9da5c..25eb5933002569fdf0ec118ee09e499768264996 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -43,7 +43,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const std::array<DimSize_t, DIM> &kerne
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index db5ab4d4066e2ca6ad133dc30e90e5b90bd545af..3d58d6ce397ac8c2601b1fce543ca127abf3aaca 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -45,7 +45,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(float epsilon, float momentum, bool train
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
index 0c6a1de914182fd55981c3fd45fd37d597e7f2ae..c2fa39d0bd47b9f8d89e9dc5a18ffeb8a132fc18 100644
--- a/src/operator/BitShift.cpp
+++ b/src/operator/BitShift.cpp
@@ -34,7 +34,7 @@ BitShift_Op::BitShift_Op(BitShiftDirection direction, bool rounding)
 
 BitShift_Op::BitShift_Op(const BitShift_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 587310c068fadb26dff1495c02ec46a90b600b72..ac9d55247762c8a4c6181ce8266da82357e81c75 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -39,7 +39,7 @@ Cast_Op::Cast_Op(const DataType targetType)
 
 Cast_Op::Cast_Op(const Cast_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Cast_Op, *this, op.backend());
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index c1b165bb9ebe737a9730da482cc2c5dbc67a3e8d..d5978e79f6f1d7246b997d2a1e91e01e8495c2fd 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -27,7 +27,7 @@ Clip_Op::Clip_Op(float min, float max)
 
 Clip_Op::Clip_Op(const Clip_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(Clip_Op, *this, op.backend());
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 908e1487f456498eb1a9e2ab3ad00791153fbd62..8cc4f5f3b1e258830814e4ebca9c164122b6e79e 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -35,7 +35,7 @@ Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis
 
 Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Concat_Op, *this, op.backend());
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index 524eb44beb01cb3b47c46e971533ccd75f8814d6..2e9adb8181511d2851a84f6965051bbe3e402f0e 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -34,7 +34,7 @@ ConstantOfShape_Op::ConstantOfShape_Op(const Tensor &value)
         attr<ConstantOfShapeAttr::Value>(value))) {}
 
 ConstantOfShape_Op::ConstantOfShape_Op(const ConstantOfShape_Op &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes)
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 994388fa8f78d4dfb4362361dabb2f2d9344e34b..5829c9439567b28445ea7a7af0aaba1828b9b4a9 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -30,7 +30,7 @@ const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D"
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index d2a1c9e3d08d8e2c0400d436c6123aeb5f7ce66b..0a8480acec5cade021e5e3405212b2c446914a31 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -30,7 +30,7 @@ const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
diff --git a/src/operator/ConvTranspose.cpp b/src/operator/ConvTranspose.cpp
index 8571518d7976a516283a13c651576a095e5e017a..a048f4049e2a39e98ac4afcdb0a04ffeacf9070a 100644
--- a/src/operator/ConvTranspose.cpp
+++ b/src/operator/ConvTranspose.cpp
@@ -32,7 +32,7 @@ const std::string ConvTranspose_Op<DIM>::Type =
 
 template <DimIdx_t DIM>
 ConvTranspose_Op<DIM>::ConvTranspose_Op(const ConvTranspose_Op<DIM> &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, op.backend());
     } else {
diff --git a/src/operator/CryptoHash.cpp b/src/operator/CryptoHash.cpp
index 064b480b1b249e6f73439489a17fa13fc27b24ad..530e94766e7d790e64dae70239bdf3ef6eed3a53 100644
--- a/src/operator/CryptoHash.cpp
+++ b/src/operator/CryptoHash.cpp
@@ -30,7 +30,7 @@ Aidge::CryptoHash_Op::CryptoHash_Op()
 
 Aidge::CryptoHash_Op::CryptoHash_Op(const Aidge::CryptoHash_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(CryptoHash_Op, *this, op.backend());
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
index e709849e57777130726b791d9281ceadf8fc5d86..9ab7034e7bdc80396742ce5d436b7a768b3686f0 100644
--- a/src/operator/DepthToSpace.cpp
+++ b/src/operator/DepthToSpace.cpp
@@ -35,7 +35,7 @@ Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aid
 
 Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
diff --git a/src/operator/Dropout.cpp b/src/operator/Dropout.cpp
index 66904933d0eb80a6175d8ca4d3cc47205ffbf391..0063a446ee3741691f5c963935f8e9a3fd116d0f 100644
--- a/src/operator/Dropout.cpp
+++ b/src/operator/Dropout.cpp
@@ -33,7 +33,7 @@ Dropout_Op::Dropout_Op(float probability)
 
 Dropout_Op::Dropout_Op(const Dropout_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     // Copy constructor implementation
     if (op.mImpl) {
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index 4714feb11659a879a81984c638ad6872545c23b0..4d4b6385bfc766283045042e82f66e3f450654e4 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -36,7 +36,7 @@ Aidge::Flatten_Op::Flatten_Op(const std::int64_t axis)
 
 Aidge::Flatten_Op::Flatten_Op(const Aidge::Flatten_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Flatten_Op, *this, op.backend());
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 1e1db2f94948dfd1dd4c6219419b7989eeac8b3a..50a474cd37d5e28edd5512d0725337ea6923e229 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -29,7 +29,7 @@ const std::string Aidge::Fold_Op<DIM>::Type = "Fold" + std::to_string(DIM) + "D"
 template <Aidge::DimIdx_t DIM>
 Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index a4cb4aab0f10cbb3b197e743a5b40208b4a0da94..410403adcf1794411a1f4a7de43b67601abd74fa 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -37,7 +37,7 @@ Aidge::Gather_Op::Gather_Op(std::int8_t axis,
 }
 
 Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
-    : OperatorTensor(op), mAttributes(op.mAttributes)
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Gather_Op, *this, op.backend());
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index d26679f8337390879c8f4c4d10deb883fb40e6da..e1b8d1442848a4a2a0f1807ad45280a7db70b068 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -41,7 +41,7 @@ Aidge::GridSample_Op::GridSample_Op(
 
 Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
     : OperatorTensor(other),
-      mAttributes(other.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*other.mAttributes))
 {
     if (other.mImpl) {
         SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp
index 6555a530bd02edf6f1823469297d289fb4b57b87..3c6fe5495653e0db5876e957b4be0bd378971819 100644
--- a/src/operator/Heaviside.cpp
+++ b/src/operator/Heaviside.cpp
@@ -33,7 +33,7 @@ Heaviside_Op::Heaviside_Op(float value)
           std::make_shared<Attributes_>(attr<Attr::Value>(value))) {}
 
 Heaviside_Op::Heaviside_Op(const Heaviside_Op &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
     if (op.mImpl) {
         SET_IMPL_MACRO(Heaviside_Op, *this, op.backend());
     } else {
diff --git a/src/operator/LRN.cpp b/src/operator/LRN.cpp
index 5b7d663e78cf92047e3ed47212f2a27d42a8de49..36dde6712c9009d54162753e1076321f9a16688b 100644
--- a/src/operator/LRN.cpp
+++ b/src/operator/LRN.cpp
@@ -31,7 +31,7 @@ Aidge::LRN_Op::LRN_Op(std::int32_t size)
 
 Aidge::LRN_Op::LRN_Op(const Aidge::LRN_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(LRN_Op, *this, op.backend());
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index dea73f3101887c5213a02b029d344a34f74ba4af..b5e1a9d6acaa6208617857501b770c7c1dcf9f55 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -20,7 +20,7 @@ const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
 
 Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index b0bd167dd28a10b22516259b5087a834bd6afeda..2ee3613407d0d9c334f0947adec36734d328a8f5 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -38,7 +38,7 @@ Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM>
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 4f1195045001c34db7b1a27c8a9b6cecb484e34f..c3ccc12ac090f0e813f521abac693233115370b3 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -37,7 +37,7 @@ Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
 
 Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
diff --git a/src/operator/Mod.cpp b/src/operator/Mod.cpp
index 038a3c28451f6cc4a4a1db73314be1bf07f9e67d..673c00225da906dbb1b1bda5b57e52482ec86d31 100644
--- a/src/operator/Mod.cpp
+++ b/src/operator/Mod.cpp
@@ -30,7 +30,7 @@ Aidge::Mod_Op::Mod_Op()
 
 Aidge::Mod_Op::Mod_Op(const Aidge::Mod_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(Mod_Op, *this, op.backend());
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index e3a41bc7aabb44eea3668ef301cc008ed02e40bb..01b45e6d3dd2f62d64f6b048a181bbf6ed4b65f9 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -34,7 +34,7 @@ Aidge::Pop_Op::Pop_Op()
 
 Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Pop_Op, *this, op.backend());
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 0beaf91b3a31ee9347a91ae4b77287ac0abcdc20..505192661eb4c519feaec4b79d8a54dd523f07c3 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -59,7 +59,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
  */
 Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     *mOutputs[0] = *(op.getOutput(0));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index dfaa75a4883ce2c9dcc77f89dc9f970c3f1ed2cd..ec6e68fbee2a88233d604f53aa633c5f789516d3 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -37,7 +37,7 @@ Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 3df66f293186dc88057dced68640420b265bb3d2..50768a0980ef092568041b65e28679c6aa18ab35 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -37,7 +37,7 @@ Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allow
 
 Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index 268a14cf9759a6e03302680814778da4804dcc19..218d25cbd19db224189fce0aa1ef39ec306648f8 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -36,7 +36,7 @@ Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOu
 
 Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
     if (op.mImpl){
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index cc8380099d003f499180719687fc6c8d5d4d643d..0927d3a6b8ea4f287677bfdd521c60503521b9a0 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -36,7 +36,7 @@ Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
 
 Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Shape_Op, *this, op.backend());
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index de08ae6e9a4c6955de262c98d817a154a2d6b045..60ec176c7a1d412801d8e0f6da9503b8f9f8ea8d 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -49,7 +49,7 @@ Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
 }
 
 Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op& op)
-    : OperatorTensor(op), mAttributes(op.mAttributes)
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Slice_Op, *this, op.backend());
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index ad894c5e56a674a452d0388f88a7e4ad268dd216..df8a9e0217ad4c4fa514b89258a6aa4c02ba608b 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -28,7 +28,7 @@ Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
 
 Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index f93a36606b61a067ba058f4780db741f7f281fb4..4bdf01b694b6d5e764c60a0b94f63a877164139b 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -37,7 +37,7 @@ Aidge::Split_Op::Split_Op(std::int8_t axis,
 
 Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Split_Op, *this, op.backend());
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index ea34528787f8a8f0e9f8032d97302b1df21532f7..53b8e76ed0b35ad979811929ea5e9dceb68f0b45 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -41,7 +41,7 @@ Squeeze_Op::Squeeze_Op(const std::vector<std::int8_t> &axes)
 
 Squeeze_Op::Squeeze_Op(const Squeeze_Op &op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index b884baaa4ec94e57c17eb7c3c48c0919f8a587d3..9e66fac648052ad79be64e205a567f134f476fc0 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -35,7 +35,7 @@ StackOp::StackOp(std::uint32_t maxElements)
 }
 
 StackOp::StackOp(const Aidge::StackOp &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(StackOp, *this, op.backend());
     } else {
diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
index 660865100a817b32c42328b34032541479b3aefc..4d574784fb9c79e7239a3c01b90dde873804aeb1 100644
--- a/src/operator/TopK.cpp
+++ b/src/operator/TopK.cpp
@@ -45,7 +45,7 @@ TopK_Op::TopK_Op(
 
 TopK_Op::TopK_Op(const TopK_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(TopK_Op, *this, op.backend());
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index f9e46947654a1117891dbe1e5cd679ae0e436dec..55214e69ca55e4231a9077e6603b0fcb4e414c56 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -42,7 +42,7 @@ Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDim
 
 Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 888109240ce35841da82aac2600151d3632ffd2b..5fac669b8a7a45b89ee78628641b84df8c642be7 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -44,7 +44,7 @@ Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernel
 template <Aidge::DimIdx_t DIM>
 Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 679b420ec3d794f7efbbe730dd0d75fde4553dea..b73b416c915b46babf7bfdffd99770c7103fd84c 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -37,7 +37,7 @@ Unsqueeze_Op::Unsqueeze_Op(const std::vector<int8_t> &axes)
 
 Unsqueeze_Op::Unsqueeze_Op(const Unsqueeze_Op &op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index f4bc32e8b70637738b14c684603b71a036e74fb6..cb1915dc73df692136ab3d1dac6b14b89e095a76 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -249,4 +249,20 @@ void Log::setFileName(const std::string& fileName) {
     }
 }
 
+std::string htmlEscape(const std::string& data) {
+    std::string buffer;
+    buffer.reserve(data.size());
+    for(size_t pos = 0; pos != data.size(); ++pos) {
+        switch(data[pos]) {
+            case '&':  buffer.append("&amp;");       break;
+            case '\"': buffer.append("&quot;");      break;
+            case '\'': buffer.append("&apos;");      break;
+            case '<':  buffer.append("&lt;");        break;
+            case '>':  buffer.append("&gt;");        break;
+            default:   buffer.append(&data[pos], 1); break;
+        }
+    }
+    return buffer;
+}
+
 } // namespace Aidge