From 5f0a18f57e372a30df896b8a956b3a98f150165d Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 10 Apr 2025 14:50:44 +0200
Subject: [PATCH 1/5] Added htmlEscape

---
 include/aidge/utils/Log.hpp |  1 +
 src/graph/GraphView.cpp     |  5 +++--
 src/utils/Log.cpp           | 16 ++++++++++++++++
 3 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index d2d06fa5f..452468278 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -258,6 +258,7 @@ private:
     static int mFloatingPointPrecision;
 };
 
+std::string htmlEscape(const std::string& data);
 } // namespace Aidge
 
 namespace {
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 9d6557054..551ab907d 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -106,14 +106,15 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         std::string attrs;
         // Ignore name attribute (if size == 1)
         if (node_ptr->attributes()->getAttrs().size() > 1) {
+          attrs = htmlEscape(fmt::format("{}", *node_ptr->attributes()));
           attrs = fmt::format("&nbsp;<sup><span title=\"{}\" style=\"cursor: pointer; font-weight: bold; color: blue\">[{}]</span></sup>",
-            *node_ptr->attributes(), node_ptr->attributes()->getAttrs().size());
+            attrs, node_ptr->attributes()->getAttrs().size());
         }
 
         std::string givenName =
             (node_ptr->name().empty())
                 ? "\"<em>" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + "</em>" + attrs + "\""
-                : "\"" + node_ptr->name() + attrs + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
+                : "\"" + htmlEscape(node_ptr->name()) + attrs + "<br/><sub><em>(" + node_ptr->type() + "#" + namePtrTable.at(node_ptr) + ")</em></sub>\"";
 
         if (verbose) {
           givenName += "<br/><span style='color:white; background-color: purple; float: right'>" + node_ptr->getOperator()->backend() + "</span>";
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index f4bc32e8b..cb1915dc7 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -249,4 +249,20 @@ void Log::setFileName(const std::string& fileName) {
     }
 }
 
+std::string htmlEscape(const std::string& data) {
+    std::string buffer;
+    buffer.reserve(data.size());
+    for(size_t pos = 0; pos != data.size(); ++pos) {
+        switch(data[pos]) {
+            case '&':  buffer.append("&amp;");       break;
+            case '\"': buffer.append("&quot;");      break;
+            case '\'': buffer.append("&apos;");      break;
+            case '<':  buffer.append("&lt;");        break;
+            case '>':  buffer.append("&gt;");        break;
+            default:   buffer.append(&data[pos], 1); break;
+        }
+    }
+    return buffer;
+}
+
 } // namespace Aidge
-- 
GitLab


From 2480e05d16d11a391ae35f53e366e2c1c8367cd3 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 10 Apr 2025 16:00:20 +0200
Subject: [PATCH 2/5] Added const version of refCastFrom

---
 include/aidge/data/Tensor.hpp | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 9b2c6d76e..927911056 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -885,6 +885,10 @@ public:
         // First refFrom, to ensure that fallback, if required, is also on desired device
         return refFrom(fallback, backend, device).refCast(fallback, dt);
     }
+    const Tensor& refCastFrom(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const {
+        // First refFrom, to ensure that fallback, if required, is also on desired device
+        return refFrom(fallback, backend, device).refCast(fallback, dt);
+    }
 
     /**
      * Return a reference to a Tensor with same characteristics
@@ -904,6 +908,10 @@ public:
         const auto& device = targetReqs.getImpl()->device();
         return refCastFrom(fallback, targetReqs.dataType(), device.first, device.second);
     }
+    const Tensor& refCastFrom(std::shared_ptr<Tensor>& fallback, const Tensor& targetReqs) const {
+        const auto& device = targetReqs.getImpl()->device();
+        return refCastFrom(fallback, targetReqs.dataType(), device.first, device.second);
+    }
 
     /**
      * @brief Return a reference to a Tensor on desired data type and backend/device:
@@ -941,6 +949,10 @@ public:
         const auto& device = targetReqs.getImpl()->device();
         return ref(fallback, targetReqs.dataType(), device.first, device.second);
     }
+    const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Tensor& targetReqs) const {
+        const auto& device = targetReqs.getImpl()->device();
+        return ref(fallback, targetReqs.dataType(), device.first, device.second);
+    }
 
 
     /**
-- 
GitLab


From 050faae975320e9da7f5bfaacb5ffe78fe29a1ae Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 10 Apr 2025 16:00:43 +0200
Subject: [PATCH 3/5] Make inputs/outputs order deterministic when adding a
 GraphView to a GraphView

---
 src/graph/GraphView.cpp | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 551ab907d..5c4cf9a59 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -1093,7 +1093,14 @@ bool Aidge::GraphView::add(std::pair<NodePtr, std::set<NodePtr>> nodes, bool inc
 bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph, bool includeLearnableParam) {
     // set the rootNode to the other graphView rootNode if no rootNode yet
     mRootNode = mRootNode ? mRootNode : graph->rootNode();
-    return add(graph->getNodes(), includeLearnableParam);
+    auto orderedInputs = mInputNodes;
+    auto orderedOutputs = mOutputNodes;
+    const auto res = add(graph->getNodes(), includeLearnableParam);
+    orderedInputs.insert(orderedInputs.end(), graph->getOrderedInputs().begin(), graph->getOrderedInputs().end());
+    orderedOutputs.insert(orderedOutputs.end(), graph->getOrderedOutputs().begin(), graph->getOrderedOutputs().end());
+    setOrderedInputs(orderedInputs);
+    setOrderedOutputs(orderedOutputs);
+    return res;
 }
 
 void Aidge::GraphView::addChild(std::shared_ptr<Node> toOtherNode,
-- 
GitLab


From 7d665ab6be4b2796beccf8147d1e8805fb2ef353 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 10 Apr 2025 17:40:43 +0200
Subject: [PATCH 4/5] Correct inputs/outputs order

---
 src/graph/GraphView.cpp | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 5c4cf9a59..aecec559d 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -1096,10 +1096,21 @@ bool Aidge::GraphView::add(std::shared_ptr<GraphView> graph, bool includeLearnab
     auto orderedInputs = mInputNodes;
     auto orderedOutputs = mOutputNodes;
     const auto res = add(graph->getNodes(), includeLearnableParam);
+
+    // Keep inputs/outputs relative order. The ordered inputs/outputs of the added graph
+    // are added to the ordered inputs/outputs of the current graph, after the existing ones.
     orderedInputs.insert(orderedInputs.end(), graph->getOrderedInputs().begin(), graph->getOrderedInputs().end());
-    orderedOutputs.insert(orderedOutputs.end(), graph->getOrderedOutputs().begin(), graph->getOrderedOutputs().end());
+    orderedInputs.erase(std::remove_if(orderedInputs.begin(), orderedInputs.end(), [this](const auto& x) {
+        return std::find(mInputNodes.begin(), mInputNodes.end(), x) == mInputNodes.end();
+    }), orderedInputs.end());
     setOrderedInputs(orderedInputs);
+
+    orderedOutputs.insert(orderedOutputs.end(), graph->getOrderedOutputs().begin(), graph->getOrderedOutputs().end());
+    orderedOutputs.erase(std::remove_if(orderedOutputs.begin(), orderedOutputs.end(), [this](const auto& x) {
+        return std::find(mOutputNodes.begin(), mOutputNodes.end(), x) == mOutputNodes.end();
+    }), orderedOutputs.end());
     setOrderedOutputs(orderedOutputs);
+
     return res;
 }
 
-- 
GitLab


From 7957949fef40173dbaaa99f6f7278e002f57c41a Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Thu, 10 Apr 2025 22:00:22 +0200
Subject: [PATCH 5/5] Fixed attributes not properly cloned

---
 include/aidge/operator/Pad.hpp       | 2 +-
 include/aidge/operator/ReduceSum.hpp | 2 +-
 include/aidge/operator/Resize.hpp    | 2 +-
 src/operator/ArgMax.cpp              | 2 +-
 src/operator/AvgPooling.cpp          | 2 +-
 src/operator/BatchNorm.cpp           | 2 +-
 src/operator/BitShift.cpp            | 2 +-
 src/operator/Cast.cpp                | 2 +-
 src/operator/Clip.cpp                | 2 +-
 src/operator/Concat.cpp              | 2 +-
 src/operator/ConstantOfShape.cpp     | 2 +-
 src/operator/Conv.cpp                | 2 +-
 src/operator/ConvDepthWise.cpp       | 2 +-
 src/operator/ConvTranspose.cpp       | 2 +-
 src/operator/CryptoHash.cpp          | 2 +-
 src/operator/DepthToSpace.cpp        | 2 +-
 src/operator/Dropout.cpp             | 2 +-
 src/operator/Flatten.cpp             | 2 +-
 src/operator/Fold.cpp                | 2 +-
 src/operator/Gather.cpp              | 2 +-
 src/operator/GridSample.cpp          | 2 +-
 src/operator/Heaviside.cpp           | 2 +-
 src/operator/LRN.cpp                 | 2 +-
 src/operator/LeakyReLU.cpp           | 2 +-
 src/operator/MaxPooling.cpp          | 2 +-
 src/operator/Memorize.cpp            | 2 +-
 src/operator/Mod.cpp                 | 2 +-
 src/operator/Pop.cpp                 | 2 +-
 src/operator/Producer.cpp            | 2 +-
 src/operator/ReduceMean.cpp          | 2 +-
 src/operator/Reshape.cpp             | 2 +-
 src/operator/Scaling.cpp             | 2 +-
 src/operator/Shape.cpp               | 2 +-
 src/operator/Slice.cpp               | 2 +-
 src/operator/Softmax.cpp             | 2 +-
 src/operator/Split.cpp               | 2 +-
 src/operator/Squeeze.cpp             | 2 +-
 src/operator/Stack.cpp               | 2 +-
 src/operator/TopK.cpp                | 2 +-
 src/operator/Transpose.cpp           | 2 +-
 src/operator/Unfold.cpp              | 2 +-
 src/operator/Unsqueeze.cpp           | 2 +-
 42 files changed, 42 insertions(+), 42 deletions(-)

diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 3b10d9c21..1aaa82a0e 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -170,7 +170,7 @@ public:
      */
     Pad_Op(const Pad_Op& op)
         : OperatorTensor(op),
-          mAttributes(op.mAttributes) {}
+          mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {}
 
     /**
      * @brief Clone the operator using its copy-constructor.
diff --git a/include/aidge/operator/ReduceSum.hpp b/include/aidge/operator/ReduceSum.hpp
index 73f59c25d..9900a79d3 100644
--- a/include/aidge/operator/ReduceSum.hpp
+++ b/include/aidge/operator/ReduceSum.hpp
@@ -112,7 +112,7 @@ public:
      */
     ReduceSum_Op(const ReduceSum_Op& op)
         : OperatorTensor(op),
-          mAttributes(op.mAttributes)
+          mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
     {
         if (op.mImpl){
             SET_IMPL_MACRO(ReduceSum_Op, *this, op.backend());
diff --git a/include/aidge/operator/Resize.hpp b/include/aidge/operator/Resize.hpp
index 32ddbe488..6af6f85a9 100644
--- a/include/aidge/operator/Resize.hpp
+++ b/include/aidge/operator/Resize.hpp
@@ -157,7 +157,7 @@ public:
      * @param op Operator to copy.
      */
     Resize_Op(const Resize_Op &op)
-        : OperatorTensor(op), mAttributes(op.mAttributes) {
+        : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Resize_Op, *this, op.backend());
         } else {
diff --git a/src/operator/ArgMax.cpp b/src/operator/ArgMax.cpp
index 8833452a8..5abb4e969 100644
--- a/src/operator/ArgMax.cpp
+++ b/src/operator/ArgMax.cpp
@@ -34,7 +34,7 @@ Aidge::ArgMax_Op::ArgMax_Op(std::int32_t axis, bool keep_dims, bool select_last_
 
 Aidge::ArgMax_Op::ArgMax_Op(const Aidge::ArgMax_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(ArgMax_Op, *this, op.backend());
diff --git a/src/operator/AvgPooling.cpp b/src/operator/AvgPooling.cpp
index 966063cd0..25eb59330 100644
--- a/src/operator/AvgPooling.cpp
+++ b/src/operator/AvgPooling.cpp
@@ -43,7 +43,7 @@ Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const std::array<DimSize_t, DIM> &kerne
 template <Aidge::DimIdx_t DIM>
 Aidge::AvgPooling_Op<DIM>::AvgPooling_Op(const AvgPooling_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(AvgPooling_Op<DIM>, *this, op.backend());
diff --git a/src/operator/BatchNorm.cpp b/src/operator/BatchNorm.cpp
index db5ab4d40..3d58d6ce3 100644
--- a/src/operator/BatchNorm.cpp
+++ b/src/operator/BatchNorm.cpp
@@ -45,7 +45,7 @@ Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(float epsilon, float momentum, bool train
 template <Aidge::DimIdx_t DIM>
 Aidge::BatchNorm_Op<DIM>::BatchNorm_Op(const BatchNorm_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(BatchNorm_Op<DIM>, *this, op.backend());
diff --git a/src/operator/BitShift.cpp b/src/operator/BitShift.cpp
index 0c6a1de91..c2fa39d0b 100644
--- a/src/operator/BitShift.cpp
+++ b/src/operator/BitShift.cpp
@@ -34,7 +34,7 @@ BitShift_Op::BitShift_Op(BitShiftDirection direction, bool rounding)
 
 BitShift_Op::BitShift_Op(const BitShift_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(BitShift_Op, *this, op.backend());
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 587310c06..ac9d55247 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -39,7 +39,7 @@ Cast_Op::Cast_Op(const DataType targetType)
 
 Cast_Op::Cast_Op(const Cast_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Cast_Op, *this, op.backend());
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index c1b165bb9..d5978e79f 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -27,7 +27,7 @@ Clip_Op::Clip_Op(float min, float max)
 
 Clip_Op::Clip_Op(const Clip_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(Clip_Op, *this, op.backend());
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 908e1487f..8cc4f5f3b 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -35,7 +35,7 @@ Aidge::Concat_Op::Concat_Op(const Aidge::IOIndex_t nbIn, const std::int32_t axis
 
 Aidge::Concat_Op::Concat_Op(const Aidge::Concat_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Concat_Op, *this, op.backend());
diff --git a/src/operator/ConstantOfShape.cpp b/src/operator/ConstantOfShape.cpp
index 524eb44be..2e9adb818 100644
--- a/src/operator/ConstantOfShape.cpp
+++ b/src/operator/ConstantOfShape.cpp
@@ -34,7 +34,7 @@ ConstantOfShape_Op::ConstantOfShape_Op(const Tensor &value)
         attr<ConstantOfShapeAttr::Value>(value))) {}
 
 ConstantOfShape_Op::ConstantOfShape_Op(const ConstantOfShape_Op &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes)
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConstantOfShape_Op, *this, op.backend());
diff --git a/src/operator/Conv.cpp b/src/operator/Conv.cpp
index 994388fa8..5829c9439 100644
--- a/src/operator/Conv.cpp
+++ b/src/operator/Conv.cpp
@@ -30,7 +30,7 @@ const std::string Aidge::Conv_Op<DIM>::Type = "Conv" + std::to_string(DIM) + "D"
 template <Aidge::DimIdx_t DIM>
 Aidge::Conv_Op<DIM>::Conv_Op(const Aidge::Conv_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(Conv_Op<DIM>, *this, op.backend());
diff --git a/src/operator/ConvDepthWise.cpp b/src/operator/ConvDepthWise.cpp
index d2a1c9e3d..0a8480ace 100644
--- a/src/operator/ConvDepthWise.cpp
+++ b/src/operator/ConvDepthWise.cpp
@@ -30,7 +30,7 @@ const std::string Aidge::ConvDepthWise_Op<DIM>::Type = "ConvDepthWise" + std::to
 template <Aidge::DimIdx_t DIM>
 Aidge::ConvDepthWise_Op<DIM>::ConvDepthWise_Op(const Aidge::ConvDepthWise_Op<DIM>& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConvDepthWise_Op<DIM>, *this, op.backend());
diff --git a/src/operator/ConvTranspose.cpp b/src/operator/ConvTranspose.cpp
index 8571518d7..a048f4049 100644
--- a/src/operator/ConvTranspose.cpp
+++ b/src/operator/ConvTranspose.cpp
@@ -32,7 +32,7 @@ const std::string ConvTranspose_Op<DIM>::Type =
 
 template <DimIdx_t DIM>
 ConvTranspose_Op<DIM>::ConvTranspose_Op(const ConvTranspose_Op<DIM> &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
     if (op.mImpl) {
         SET_IMPL_MACRO(ConvTranspose_Op<DIM>, *this, op.backend());
     } else {
diff --git a/src/operator/CryptoHash.cpp b/src/operator/CryptoHash.cpp
index 064b480b1..530e94766 100644
--- a/src/operator/CryptoHash.cpp
+++ b/src/operator/CryptoHash.cpp
@@ -30,7 +30,7 @@ Aidge::CryptoHash_Op::CryptoHash_Op()
 
 Aidge::CryptoHash_Op::CryptoHash_Op(const Aidge::CryptoHash_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(CryptoHash_Op, *this, op.backend());
diff --git a/src/operator/DepthToSpace.cpp b/src/operator/DepthToSpace.cpp
index e709849e5..9ab7034e7 100644
--- a/src/operator/DepthToSpace.cpp
+++ b/src/operator/DepthToSpace.cpp
@@ -35,7 +35,7 @@ Aidge::DepthToSpace_Op::DepthToSpace_Op(const std::uint32_t blockSize, const Aid
 
 Aidge::DepthToSpace_Op::DepthToSpace_Op(const Aidge::DepthToSpace_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(DepthToSpace_Op, *this, op.backend());
diff --git a/src/operator/Dropout.cpp b/src/operator/Dropout.cpp
index 66904933d..0063a446e 100644
--- a/src/operator/Dropout.cpp
+++ b/src/operator/Dropout.cpp
@@ -33,7 +33,7 @@ Dropout_Op::Dropout_Op(float probability)
 
 Dropout_Op::Dropout_Op(const Dropout_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     // Copy constructor implementation
     if (op.mImpl) {
diff --git a/src/operator/Flatten.cpp b/src/operator/Flatten.cpp
index 4714feb11..4d4b6385b 100644
--- a/src/operator/Flatten.cpp
+++ b/src/operator/Flatten.cpp
@@ -36,7 +36,7 @@ Aidge::Flatten_Op::Flatten_Op(const std::int64_t axis)
 
 Aidge::Flatten_Op::Flatten_Op(const Aidge::Flatten_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Flatten_Op, *this, op.backend());
diff --git a/src/operator/Fold.cpp b/src/operator/Fold.cpp
index 1e1db2f94..50a474cd3 100644
--- a/src/operator/Fold.cpp
+++ b/src/operator/Fold.cpp
@@ -29,7 +29,7 @@ const std::string Aidge::Fold_Op<DIM>::Type = "Fold" + std::to_string(DIM) + "D"
 template <Aidge::DimIdx_t DIM>
 Aidge::Fold_Op<DIM>::Fold_Op(const Aidge::Fold_Op<DIM> &op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Fold_Op<DIM>, *this, op.backend());
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index a4cb4aab0..410403adc 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -37,7 +37,7 @@ Aidge::Gather_Op::Gather_Op(std::int8_t axis,
 }
 
 Aidge::Gather_Op::Gather_Op(const Aidge::Gather_Op& op)
-    : OperatorTensor(op), mAttributes(op.mAttributes)
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Gather_Op, *this, op.backend());
diff --git a/src/operator/GridSample.cpp b/src/operator/GridSample.cpp
index d26679f83..e1b8d1442 100644
--- a/src/operator/GridSample.cpp
+++ b/src/operator/GridSample.cpp
@@ -41,7 +41,7 @@ Aidge::GridSample_Op::GridSample_Op(
 
 Aidge::GridSample_Op::GridSample_Op(const Aidge::GridSample_Op& other)
     : OperatorTensor(other),
-      mAttributes(other.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*other.mAttributes))
 {
     if (other.mImpl) {
         SET_IMPL_MACRO(GridSample_Op, *this, other.backend());
diff --git a/src/operator/Heaviside.cpp b/src/operator/Heaviside.cpp
index 6555a530b..3c6fe5495 100644
--- a/src/operator/Heaviside.cpp
+++ b/src/operator/Heaviside.cpp
@@ -33,7 +33,7 @@ Heaviside_Op::Heaviside_Op(float value)
           std::make_shared<Attributes_>(attr<Attr::Value>(value))) {}
 
 Heaviside_Op::Heaviside_Op(const Heaviside_Op &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
     if (op.mImpl) {
         SET_IMPL_MACRO(Heaviside_Op, *this, op.backend());
     } else {
diff --git a/src/operator/LRN.cpp b/src/operator/LRN.cpp
index 5b7d663e7..36dde6712 100644
--- a/src/operator/LRN.cpp
+++ b/src/operator/LRN.cpp
@@ -31,7 +31,7 @@ Aidge::LRN_Op::LRN_Op(std::int32_t size)
 
 Aidge::LRN_Op::LRN_Op(const Aidge::LRN_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(LRN_Op, *this, op.backend());
diff --git a/src/operator/LeakyReLU.cpp b/src/operator/LeakyReLU.cpp
index dea73f310..b5e1a9d6a 100644
--- a/src/operator/LeakyReLU.cpp
+++ b/src/operator/LeakyReLU.cpp
@@ -20,7 +20,7 @@ const std::string Aidge::LeakyReLU_Op::Type = "LeakyReLU";
 
 Aidge::LeakyReLU_Op::LeakyReLU_Op(const Aidge::LeakyReLU_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(LeakyReLU_Op, *this, op.backend());
diff --git a/src/operator/MaxPooling.cpp b/src/operator/MaxPooling.cpp
index b0bd167dd..2ee361340 100644
--- a/src/operator/MaxPooling.cpp
+++ b/src/operator/MaxPooling.cpp
@@ -38,7 +38,7 @@ Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const std::array<Aidge::DimSize_t, DIM>
 template <Aidge::DimIdx_t DIM>
 Aidge::MaxPooling_Op<DIM>::MaxPooling_Op(const Aidge::MaxPooling_Op<DIM>& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(MaxPooling_Op<DIM>, *this, op.backend());
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 4f1195045..c3ccc12ac 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -37,7 +37,7 @@ Aidge::Memorize_Op::Memorize_Op(const std::uint32_t endStep)
 
 Aidge::Memorize_Op::Memorize_Op(const Aidge::Memorize_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Memorize_Op, *this, op.backend());
diff --git a/src/operator/Mod.cpp b/src/operator/Mod.cpp
index 038a3c284..673c00225 100644
--- a/src/operator/Mod.cpp
+++ b/src/operator/Mod.cpp
@@ -30,7 +30,7 @@ Aidge::Mod_Op::Mod_Op()
 
 Aidge::Mod_Op::Mod_Op(const Aidge::Mod_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(Mod_Op, *this, op.backend());
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index e3a41bc7a..01b45e6d3 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -34,7 +34,7 @@ Aidge::Pop_Op::Pop_Op()
 
 Aidge::Pop_Op::Pop_Op(const Aidge::Pop_Op& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Pop_Op, *this, op.backend());
diff --git a/src/operator/Producer.cpp b/src/operator/Producer.cpp
index 0beaf91b3..505192661 100644
--- a/src/operator/Producer.cpp
+++ b/src/operator/Producer.cpp
@@ -59,7 +59,7 @@ Aidge::Producer_Op::Producer_Op(const std::shared_ptr<Aidge::Tensor> tensor, boo
  */
 Aidge::Producer_Op::Producer_Op(const Aidge::Producer_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     *mOutputs[0] = *(op.getOutput(0));
     if (mOutputs[0]->getImpl() && Registrar<Producer_Op>::exists({mOutputs[0]->getImpl()->backend()})){
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index dfaa75a48..ec6e68fbe 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -37,7 +37,7 @@ Aidge::ReduceMean_Op::ReduceMean_Op(const std::vector<std::int32_t>& axes, bool
 
 Aidge::ReduceMean_Op::ReduceMean_Op(const Aidge::ReduceMean_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(ReduceMean_Op, *this, op.backend());
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 3df66f293..50768a098 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -37,7 +37,7 @@ Aidge::Reshape_Op::Reshape_Op(const std::vector<std::int64_t>& shape, bool allow
 
 Aidge::Reshape_Op::Reshape_Op(const Aidge::Reshape_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Reshape_Op, *this, op.backend());
diff --git a/src/operator/Scaling.cpp b/src/operator/Scaling.cpp
index 268a14cf9..218d25cbd 100644
--- a/src/operator/Scaling.cpp
+++ b/src/operator/Scaling.cpp
@@ -36,7 +36,7 @@ Aidge::Scaling_Op::Scaling_Op(float scalingFactor, std::size_t nbBits, bool isOu
 
 Aidge::Scaling_Op::Scaling_Op(const Aidge::Scaling_Op& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     Log::warn("Caution: The [Scaling] operator is now deprecated and should no longer be used. \nIt has been replaced by the MetaOperator [Quantizer] (located directly in aidge_quantization).");
     if (op.mImpl){
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index cc8380099..0927d3a6b 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -36,7 +36,7 @@ Aidge::Shape_Op::Shape_Op(const std::int64_t start, const std::int64_t end)
 
 Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Shape_Op, *this, op.backend());
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index de08ae6e9..60ec176c7 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -49,7 +49,7 @@ Aidge::Slice_Op::Slice_Op(const std::vector<std::int64_t>& starts,
 }
 
 Aidge::Slice_Op::Slice_Op(const Aidge::Slice_Op& op)
-    : OperatorTensor(op), mAttributes(op.mAttributes)
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Slice_Op, *this, op.backend());
diff --git a/src/operator/Softmax.cpp b/src/operator/Softmax.cpp
index ad894c5e5..df8a9e021 100644
--- a/src/operator/Softmax.cpp
+++ b/src/operator/Softmax.cpp
@@ -28,7 +28,7 @@ Aidge::Softmax_Op::Softmax_Op(std::int32_t axis)
 
 Aidge::Softmax_Op::Softmax_Op(const Aidge::Softmax_Op& op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl){
         SET_IMPL_MACRO(Softmax_Op, *this, op.backend());
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index f93a36606..4bdf01b69 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -37,7 +37,7 @@ Aidge::Split_Op::Split_Op(std::int8_t axis,
 
 Aidge::Split_Op::Split_Op(const Aidge::Split_Op &op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Split_Op, *this, op.backend());
diff --git a/src/operator/Squeeze.cpp b/src/operator/Squeeze.cpp
index ea3452878..53b8e76ed 100644
--- a/src/operator/Squeeze.cpp
+++ b/src/operator/Squeeze.cpp
@@ -41,7 +41,7 @@ Squeeze_Op::Squeeze_Op(const std::vector<std::int8_t> &axes)
 
 Squeeze_Op::Squeeze_Op(const Squeeze_Op &op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Squeeze_Op, *this, op.backend());
diff --git a/src/operator/Stack.cpp b/src/operator/Stack.cpp
index b884baaa4..9e66fac64 100644
--- a/src/operator/Stack.cpp
+++ b/src/operator/Stack.cpp
@@ -35,7 +35,7 @@ StackOp::StackOp(std::uint32_t maxElements)
 }
 
 StackOp::StackOp(const Aidge::StackOp &op)
-    : OperatorTensor(op), mAttributes(op.mAttributes) {
+    : OperatorTensor(op), mAttributes(std::make_shared<Attributes_>(*op.mAttributes)) {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(StackOp, *this, op.backend());
     } else {
diff --git a/src/operator/TopK.cpp b/src/operator/TopK.cpp
index 660865100..4d574784f 100644
--- a/src/operator/TopK.cpp
+++ b/src/operator/TopK.cpp
@@ -45,7 +45,7 @@ TopK_Op::TopK_Op(
 
 TopK_Op::TopK_Op(const TopK_Op& op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (op.mImpl) {
         SET_IMPL_MACRO(TopK_Op, *this, op.backend());
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index f9e469476..55214e69c 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -42,7 +42,7 @@ Aidge::Transpose_Op::Transpose_Op(const std::vector<Aidge::DimSize_t> &outputDim
 
 Aidge::Transpose_Op::Transpose_Op(const Aidge::Transpose_Op& op)
     : OperatorTensor(op),
-    mAttributes(op.mAttributes)
+    mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Transpose_Op, *this, op.backend());
diff --git a/src/operator/Unfold.cpp b/src/operator/Unfold.cpp
index 888109240..5fac669b8 100644
--- a/src/operator/Unfold.cpp
+++ b/src/operator/Unfold.cpp
@@ -44,7 +44,7 @@ Aidge::Unfold_Op<DIM>::Unfold_Op(const std::array<Aidge::DimSize_t, DIM> &kernel
 template <Aidge::DimIdx_t DIM>
 Aidge::Unfold_Op<DIM>::Unfold_Op(const Aidge::Unfold_Op<DIM> &op)
     : OperatorTensor(op),
-        mAttributes(op.mAttributes)
+        mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Unfold_Op<DIM>, *this, op.backend());
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 679b420ec..b73b416c9 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -37,7 +37,7 @@ Unsqueeze_Op::Unsqueeze_Op(const std::vector<int8_t> &axes)
 
 Unsqueeze_Op::Unsqueeze_Op(const Unsqueeze_Op &op)
     : OperatorTensor(op),
-      mAttributes(op.mAttributes)
+      mAttributes(std::make_shared<Attributes_>(*op.mAttributes))
 {
     if (!op.backend().empty()) {
         SET_IMPL_MACRO(Unsqueeze_Op, *this, op.backend());
-- 
GitLab