diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py
index 5b25eb7975d439816dbf91cc95b462f217fd0227..8d6f2686d9010ac4ebed80cd04f74effe763e977 100644
--- a/aidge_core/unit_tests/test_operator_binding.py
+++ b/aidge_core/unit_tests/test_operator_binding.py
@@ -73,15 +73,26 @@ class test_operator_binding(unittest.TestCase):
         self.assertEqual(attrs.get_attr("b"), "test")
         self.assertEqual(attrs.has_attr("c"), True)
         self.assertEqual(attrs.get_attr("c"), [True, False, True])
-        self.assertEqual(attrs.dict().keys(), {"a", "b", "c"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "mem", "impl"})
         self.assertEqual(attrs.has_attr("d"), False)
+        self.assertEqual(attrs.has_attr("mem.a"), True)
+        self.assertEqual(attrs.get_attr("mem.a"), 1)
+        self.assertEqual(attrs.has_attr("mem.data.b"), True)
+        self.assertEqual(attrs.get_attr("mem.data.b"), 1.0)
+        self.assertEqual(attrs.get_attr("mem").get_attr("data").get_attr("b"), 1.0)
+        self.assertEqual(attrs.has_attr("impl.c"), True)
+        self.assertEqual(attrs.get_attr("impl.c"), "test")
 
         # Add Python attributes
         attrs.add_attr("d", 18.56)
         self.assertEqual(attrs.get_attr("d"), 18.56)
         self.assertEqual(attrs.has_attr("d"), True)
-        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d"})
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d", "mem", "impl"})
         self.assertEqual(attrs.has_attr("e"), False)
+        attrs.add_attr("mem.data.c", 19.36)
+        self.assertEqual(attrs.get_attr("mem.data.c"), 19.36)
+        self.assertEqual(attrs.has_attr("mem.data.c"), True)
+        self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d", "mem", "impl"})
 
         # Check that added Python attribute is accessible in C++
         # Return the value of an attribute named "d" of type float64 (double in C++)
@@ -89,6 +100,23 @@ class test_operator_binding(unittest.TestCase):
         attrs.d = 23.89
         self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89)
 
+        op = aidge_core.GenericOperatorOp("any_type", 1,0,1)
+        with self.assertRaises(RuntimeError):
+            op.attr.something
+
+        op.attr.something = aidge_core.DynamicAttributes()
+        try:
+            self.assertEqual(str(op.attr), "AttrDict({'something': AttrDict({})})")
+        except Exception:
+            self.fail("op.attr.something raised Exception unexpectedly!")
+
+        op.attr.something.arg1 = 4
+        self.assertEqual(op.attr.something.arg1, 4)
+
+        # auto create the namespace another_thing (not enabled)
+        #op.attr.another_thing.arg = 44
+        #self.assertEqual(op.attr.another_thing.arg, 44)
+
     def test_forward_dims(self):
         in_dims=[25, 25]
         input = aidge_core.Producer(in_dims, name="In")
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index ff5a4fc4b8fe728efd517a74d3a9613a97e8809b..6454ed233c561e386199e4db40ca698ee9edad8a 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -132,10 +132,14 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
         {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
         {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
-        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
         {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt64(
+        {"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
+        {"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
+        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
         {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
 }  // namespace
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index ffee8c41a6e5adc13bad1d884e840986e7a868bb..108f1f2b4af12b3501dbb247d17052e42ebb70ed 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -57,7 +57,8 @@ class Tensor : public Data,
 
     /**
      * @brief Construct a new empty Tensor object.
-     * It has the features of an undefined scalar.
+     * It is considered undefined, i.e. dims can't be forwarded from such a Tensor.
+     * @ref undefined() method for details
      */
     Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)
         : Data(Type),
@@ -65,7 +66,7 @@ class Tensor : public Data,
           mDataFormat(dformat),
           mDims(std::vector<DimSize_t>({})),
           mStrides({1}),
-          mSize(1)
+          mSize(0)
     {
         // ctor
     }
@@ -523,14 +524,30 @@ public:
     void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
 
     /**
-     * @brief Return if the Tensor object has at leastone element.
-     * @return true
-     * @return false
+     * @brief Return whether the Tensor object as a rank of 0, i.e. dimensions == {}.
+     * For defined Tensors, this implies that the Tensor is scalar.
+     * For backward compatibility reasons, it is valid to call this predicate
+     * even on undefined Tensors, in which case it returns true.
+     * Hence before test the rank with this method, always check that the
+     * Tensor is not undefined().
+     * In particular for operations such as forwardDims(), one should always
+     * use undefined() to test whether the Tensor dimensions have been defined.
+     * In this case empty() can be used to distinguish scalars from N-D Tensors.
+     * @return true if rank is 0 or the tensor is undefined
      */
     bool empty() const { return mDims.empty(); }
-    // bool newempty() const noexcept {
-    //     return mSize == 0;
-    // }
+
+     /**
+     * @brief Returns whether the Tensor object is undefined.
+     * An undefined Tensor is equivalent to a tensor for which dimensions have not
+     * been defined yet. Hence, dimensions forwarding can't be done from undefined tensors.
+     * The only cases where a tensor is undefined is after the default constructor
+     * and before any call to resize().
+     * Also, as soon as the resize() method has been called, the Tensor is irreversibly defined.
+     * @ref empty() method for distinguishing an undefined from a scalar
+     * @return true if undefined
+     */
+    bool undefined() const { return mSize == 0; }
 
     /**
      * @brief Set each element of the tensor to zero.
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 06ee4327e2f2d4df32c2decd73841bdf5f79a739..920829473d856b2a4c14fc0859abcd4c3b70277a 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -119,8 +119,8 @@ extern template class Aidge::AvgPooling_Op<4>;
 namespace {
 template <>
 const char *const EnumStrings<Aidge::AvgPoolingAttr>::data[] = {
-    "StrideDims",
-    "KernelDims"
+    "stride_dims",
+    "kernel_dims"
 };
 }
 
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index b5b64eb428d709e804dd9f6711530b348e0be747..08d1f6a88d394e34dd6e351f500429113a52c9fa 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -111,7 +111,7 @@ extern template std::shared_ptr<Aidge::Node> Aidge::BatchNorm<4>(const DimSize_t
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "Epsilon", "Momentum" };
+const char *const EnumStrings<Aidge::BatchNormAttr>::data[] = { "epsilon", "momentum" };
 }
 
 #endif //AIDGE_CORE_OPERATOR_BATCHNORM_H_
diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index 6911053932afff6675be4eb2c713d8d3cd34b462..291669b7c57c14a77ffa6b40fa2aefab8d281fc7 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -93,7 +93,7 @@ inline std::shared_ptr<Node> Cast(const DataType targetType, const std::string&
 
 namespace {
 template <>
-const char* const EnumStrings<Aidge::CastAttr>::data[] = { "TargetType" };
+const char* const EnumStrings<Aidge::CastAttr>::data[] = { "target_type" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CAST_H_ */
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 8341a93fe66d260ae3687170629b8759d0305a9c..ab14bf527dd9949f3bb2b6157619e58c7c7580ee 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -108,7 +108,7 @@ inline std::shared_ptr<Node> Concat(const IOIndex_t nbIn, const std::int32_t axi
 namespace {
     template <>
     const char* const EnumStrings<Aidge::ConcatAttr>::data[] = {
-        "Axis"
+        "axis"
     };
 }
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index 87ff5854b310ca472994bd6b68fd6ae58d31e806..e89c94f968ab89f43e6ef2d95a40a6f557cc41c7 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -178,9 +178,9 @@ extern template class Aidge::Conv_Op<2>;
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ConvAttr>::data[] = {
-    "StrideDims",
-    "DilationDims",
-    "KernelDims"
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
 };
 }
 
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index c8a83ff7de62a61e8125eac29d61c3938115cd09..1acf240bfcdd256953cd96b92e3622a265aafa0b 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -140,8 +140,8 @@ extern template class Aidge::ConvDepthWise_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"StrideDims", "DilationDims",
-                                                          "KernelDims"};
+const char *const EnumStrings<Aidge::ConvDepthWiseAttr>::data[] = {"stride_dims", "dilation_dims",
+                                                          "kernel_dims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_CONVDEPTHWISE_H_ */
diff --git a/include/aidge/operator/Fold.hpp b/include/aidge/operator/Fold.hpp
index 28127f9efe437531a64d228f7ed9c168edc39eb6..caf904e870425c000687ccd95397c92744020eec 100644
--- a/include/aidge/operator/Fold.hpp
+++ b/include/aidge/operator/Fold.hpp
@@ -133,10 +133,10 @@ extern template class Aidge::Fold_Op<2>;
 namespace {
 template <>
 const char *const EnumStrings<Aidge::FoldAttr>::data[] = {
-    "OutputDims",
-    "StrideDims",
-    "DilationDims",
-    "KernelDims"
+    "output_dims",
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
 };
 }
 
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 3e9b780732fa9144f2e58bef854d1b42d063d0bf..5f3917e486e2e2188bfd23bd58a13b51d5fc7a59 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -114,7 +114,7 @@ inline std::shared_ptr<Node> Gather(std::int8_t axis = 0, const std::vector<int6
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"Axis", "Indices", "GatheredShape"};
+const char *const EnumStrings<Aidge::GatherAttr>::data[] = {"axis", "indices", "gathered_shape"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_GATHER_H_ */
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 393798da2fc26b3ef3f5e4cfe54f69fd82174a5f..e07df59d888993cb33da9c20393d897ab9cf1804 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -76,7 +76,7 @@ public:
      * @return false Input has no dimensions or is a nullptr.
      */
     bool dimsForwarded() const override final {
-        return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
+        return mInputs[0] ? (mInputs[0]->undefined() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
     }
 
 
diff --git a/include/aidge/operator/LeakyReLU.hpp b/include/aidge/operator/LeakyReLU.hpp
index 294e7ebb009ff184c9150d2aa18067a15deeba22..3057b99f70fa3693f7e434be29dcd40fb98d4bea 100644
--- a/include/aidge/operator/LeakyReLU.hpp
+++ b/include/aidge/operator/LeakyReLU.hpp
@@ -99,7 +99,7 @@ inline std::shared_ptr<Node> LeakyReLU(float negativeSlope = 0.0f, const std::st
 namespace {
 template <>
 const char* const EnumStrings<Aidge::LeakyReLUAttr>::data[]
-    = {"NegativeSlope"};
+    = {"negative_slope"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 082aa26bbdf1d55dcae29d1ffb2b9810db8b17d0..7e2c68681e645133812103a94e4c39ab9d1dc970 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -156,7 +156,7 @@ inline std::shared_ptr<Node> MaxPooling(
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"StrideDims", "KernelDims", "CeilMode"};
+const char *const EnumStrings<Aidge::MaxPoolingAttr>::data[] = {"stride_dims", "kernel_dims", "ceil_mode"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_MAXPOOLING_H_ */
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index d6af56f2faad18b9e39c793ea68e39eac4dd2f01..bb652e833ad06df37f55d3582afd0e66cc3e97c8 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -113,9 +113,9 @@ inline std::shared_ptr<Node> Memorize(const std::uint32_t endStep, const std::st
 namespace {
 template <>
 const char *const EnumStrings<Aidge::MemorizeAttr>::data[] = {
-    "ScheduleStep",
-    "ForwardStep",
-    "EndStep"
+    "schedule_step",
+    "forward_step",
+    "end_step"
 };
 }
 
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index adec17d07f39727a0c75d32fa24bcc624aa66e1a..f1e25b7a1f0ba3c07d656d4170a4b2d2bc045e5b 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -109,7 +109,7 @@ public:
      * The pointer itself is not changed, thus keeping the current connections.
      * @param inputIdx Index of the input to set.
      */
-    virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) = 0;
+    virtual void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const = 0;
     virtual std::shared_ptr<Data> getRawOutput(const IOIndex_t outputIdx) const = 0;
 
     std::shared_ptr<Hook> getHook(const std::string& hookName) {
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index 657a6d8ab6124b8919a3ac8fea5b6bfa6c4254b9..1097454fce62f645eb83c491498031738847e96c 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -62,7 +62,7 @@ public:
     std::shared_ptr<Data> getRawInput(const IOIndex_t inputIdx) const override final;
 
     // output management
-    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) override;
+    void setOutput(const IOIndex_t outputIdx, const std::shared_ptr<Data>& data) const override;
     virtual const std::shared_ptr<Tensor>& getOutput(const IOIndex_t outputIdx) const;
     std::shared_ptr<Aidge::Data> getRawOutput(const Aidge::IOIndex_t outputIdx) const override final;
     ///////////////////////////////////////////////////
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index 5fd0f93986206e6cd958a85055159783eeb8bc8f..215fafb7fee10587dec38e77685d705f7c1bb980 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -139,7 +139,7 @@ extern template class Aidge::Pad_Op<2>;
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::PadAttr>::data[] = {"BeginEndBorders", "BorderType", "BorderValue"};
+const char *const EnumStrings<Aidge::PadAttr>::data[] = {"begin_end_borders", "border_type", "border_value"};
 
 template <>
 const char *const EnumStrings<Aidge::PadBorderType>::data[] = {"Constant", "Edge", "Reflect", "Wrap"};
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 575d56b455940ea98571110dbaa9a83de09fef37..fb3b32eeacf2e199df88b6bd0256cf6cbdaa1065 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -100,7 +100,7 @@ inline std::shared_ptr<Node> Pop(const std::string& name = "") {
 namespace {
 template <>
 const char *const EnumStrings<Aidge::PopAttr>::data[] = {
-    "ForwardStep"
+    "forward_step"
 };
 }
 
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 9e3bdd1ba2f601da27dea3a6a01131a0c8191eb4..1647c563d38ab4931cc3a0c2a4281555215f990e 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -115,7 +115,7 @@ public:
         // fmt::print("Basic Producer backward() function.\n");
     }
 
-    void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) override {
+    void setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const override {
         if (mAttributes->template getAttr<ProdAttr::Constant>()) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer is constant, cannot update output.");
         }
@@ -160,7 +160,7 @@ std::shared_ptr<Node> addProducer(std::shared_ptr<Node>& otherNode, const IOInde
 namespace {
 template <>
 const char *const EnumStrings<Aidge::ProdAttr>::data[] = {
-    "Constant"
+    "constant"
 };
 }
 #endif /* AIDGE_CORE_OPERATOR_PRODUCER_H_ */
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 3fcf19ffd13645fb28b6efcfefaf8e347b148c89..000607c60e4e3c85671e70a941bd11f3427333dd 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -127,7 +127,7 @@ inline std::shared_ptr<Node> ReduceMean(const std::vector<std::int32_t> &axes,
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"Axes", "KeepDims"};
+const char *const EnumStrings<Aidge::ReduceMeanAttr>::data[] = {"axes", "keep_dims"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_REDUCEMEAN_H_ */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 4ea0cca30089555ff7979f141f94e5c84f04ffa1..29a08c76c248018fff87a5f765a0b62cbd23b6b7 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -107,7 +107,7 @@ inline std::shared_ptr<Node> Reshape(const std::vector<std::int64_t>& shape = {}
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "Shape", "AllowZero" };
+const char *const EnumStrings<Aidge::ReshapeAttr>::data[] = { "shape", "allow_zero" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RESHAPE_H_ */
diff --git a/include/aidge/operator/Scaling.hpp b/include/aidge/operator/Scaling.hpp
index 7d8e11b31546cd87a8d6b2d36e2929c9ef6df7a2..0683a26f6e9d8ef462c2af4693f372b43c33a144 100644
--- a/include/aidge/operator/Scaling.hpp
+++ b/include/aidge/operator/Scaling.hpp
@@ -105,7 +105,7 @@ inline std::shared_ptr<Node> Scaling(float scalingFactor = 1.0f,
 namespace {
 template <>
 const char* const EnumStrings<Aidge::ScalingAttr>::data[]
-    = {"ScalingFactor", "QuantizedNbBits", "IsOutputUnsigned"};
+    = {"scaling_factor", "quantized_nb_bits", "is_output_unsigned"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SCALING_H_ */
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 6d2d1b5e7c212fafa5ad6457d9e0a260e96b1c90..94f237726e79d8fe7824ff2c9b2f7640bbfc716f 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -105,7 +105,7 @@ inline std::shared_ptr<Node> Shape(const std::int64_t start = 0, const std::int6
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"Start", "End"};
+const char *const EnumStrings<Aidge::ShapeAttr>::data[] = {"start", "end"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SHAPE_H_ */
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index 7d425a0f3589e74b54ee0834fdc4291ea7f49bad..04a67fe98f7682737bff6df18f28d568ee33e093 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -115,7 +115,7 @@ inline std::shared_ptr<Node> Slice(const std::vector<std::int64_t>& starts = {},
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "Starts", "Ends", "Axes", "Steps" };
+const char *const EnumStrings<Aidge::SliceAttr>::data[] = { "starts", "ends", "axes", "steps" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_RELU_H_ */
diff --git a/include/aidge/operator/Softmax.hpp b/include/aidge/operator/Softmax.hpp
index 70f3a561ae5c9ba4720de8419bcd5aaf32a51e47..0b7a8e57193439872c6fcc2699b9f5e55c643961 100644
--- a/include/aidge/operator/Softmax.hpp
+++ b/include/aidge/operator/Softmax.hpp
@@ -92,7 +92,7 @@ inline std::shared_ptr<Node> Softmax(std::int32_t axis, const std::string& name
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"Axis"};
+const char *const EnumStrings<Aidge::SoftmaxAttr>::data[] = {"axis"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SOFTMAX_H_ */
diff --git a/include/aidge/operator/Split.hpp b/include/aidge/operator/Split.hpp
index 42baf66e6722c6f9a0d3f40f12d4f4685fcc6980..7bdec1579c8a8f46640de5caf42c01568d208059 100644
--- a/include/aidge/operator/Split.hpp
+++ b/include/aidge/operator/Split.hpp
@@ -34,20 +34,24 @@ enum class SplitAttr { Axis, Split };
 
 class Split_Op
     : public OperatorTensor,
-      public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)>,
-      public StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>> {
+      public Registrable<Split_Op, std::string, std::shared_ptr<OperatorImpl>(const Split_Op &)> {
 
 public:
     static const std::string Type;
 
+private:
+    using Attributes_ = StaticAttributes<SplitAttr, std::int8_t, std::vector<DimSize_t>>;
+    template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
+    const std::shared_ptr<Attributes_> mAttributes;
+
+public:
     Split_Op() = delete;
 
-    using Attributes_ = StaticAttributes<SplitAttr,  std::int8_t, std::vector<DimSize_t>>;
-    template <SplitAttr e> using attr = typename Attributes_::template attr<e>;
     Split_Op( std::int8_t axis, DimSize_t nbOutputs, const std::vector<DimSize_t>& split)
         : OperatorTensor(Type, {InputCategory::Data, InputCategory::OptionalData}, nbOutputs),
-          Attributes_(attr<SplitAttr::Axis>(axis),
-                      attr<SplitAttr::Split>(split))
+          mAttributes(std::make_shared<Attributes_>(
+            attr<SplitAttr::Axis>(axis),
+            attr<SplitAttr::Split>(split)))
     {
         mImpl = std::make_shared<Split_OpImpl>(*this);
     }
@@ -60,7 +64,7 @@ public:
      */
     Split_Op(const Split_Op &op)
         : OperatorTensor(op),
-          Attributes_(op)
+          mAttributes(op.mAttributes)
     {
         if (!op.backend().empty()) {
             SET_IMPL_MACRO(Split_Op, *this, op.backend());
@@ -81,6 +85,10 @@ public:
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override;
 
+    inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
+    inline std::int8_t& axis() const { return mAttributes->template getAttr<SplitAttr::Axis>(); }
+    inline std::vector<DimSize_t>& split() const { return mAttributes->template getAttr<SplitAttr::Split>(); }
+
     static const std::vector<std::string> getInputsName(){
         return {"data_input", "split"};
     }
@@ -105,7 +113,7 @@ inline std::shared_ptr<Node> Split(DimSize_t nbOutput,
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "Axis", "Split" };
+const char *const EnumStrings<Aidge::SplitAttr>::data[] = { "axis", "split" };
 }
 
 #endif /* AIDGE_CORE_OPERATOR_SPLIT_H_ */
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 72096448ebf0e00d73e33bdab094ca7f0b7d0633..efd9e1792d530f45754809913a7c648d82c7985e 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -105,7 +105,7 @@ inline std::shared_ptr<Node> Transpose(const std::vector<DimSize_t> &outputDimsO
 
 namespace {
 template <>
-const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"OutputDimsOrder"};
+const char *const EnumStrings<Aidge::TransposeAttr>::data[] = {"output_dims_order"};
 }
 
 #endif /* AIDGE_CORE_OPERATOR_TRANSPOSE_H_ */
diff --git a/include/aidge/operator/Unfold.hpp b/include/aidge/operator/Unfold.hpp
index 169fbb05ebeff0e5d38eb9606133d6279cc31cd8..58cbcd2d756ad44ef2ec6a38d46909a114b187c2 100644
--- a/include/aidge/operator/Unfold.hpp
+++ b/include/aidge/operator/Unfold.hpp
@@ -137,9 +137,9 @@ extern template class Aidge::Unfold_Op<2>;
 namespace {
 template <>
 const char *const EnumStrings<Aidge::UnfoldAttr>::data[] = {
-    "StrideDims",
-    "DilationDims",
-    "KernelDims"
+    "stride_dims",
+    "dilation_dims",
+    "kernel_dims"
 };
 }
 
diff --git a/include/aidge/utils/Attributes.hpp b/include/aidge/utils/Attributes.hpp
index c1f6a8a7f704b4bd813983cb178d9e5acba5a5e1..7dce3d327d42de15dc2589788b4643742ed1a463 100644
--- a/include/aidge/utils/Attributes.hpp
+++ b/include/aidge/utils/Attributes.hpp
@@ -42,32 +42,6 @@ constexpr std::size_t size(T (&)[N]) { return N; }
 *  Attributes in the binding code.
 */
 class Attributes {
-protected:
-    /**
-     * @brief Convert snake_case to PascalCase.
-     * @param snakeCase string to convert.
-    */
-    static std::string snakeToPascal(const std::string& snakeCase);
-
-
-    /**
-     * @brief Convert PascalCase to snake_case.
-     * @param pascalCase string to convert.
-    */
-    static std::string pascalToSnake(const std::string& pascalCase);
-
-    /**
-     * @brief Check whether a given string is in PascalCase.
-     * @param str String to check.
-     */
-    static bool isPascalCase(const std::string& str);
-
-    /**
-     * @brief Check whether a given string is in snake_case.
-     * @param str String to check.
-     */
-    static bool isSnakeCase(const std::string& str);
-
 public:
     /**
      * @brief Check if the attribute exists.
diff --git a/include/aidge/utils/DynamicAttributes.hpp b/include/aidge/utils/DynamicAttributes.hpp
index c5054eb2fd2e8bfa5e7fca898f343ce630643dbd..cf7f048dbe5999f433277c46e4e3cb9798c43674 100644
--- a/include/aidge/utils/DynamicAttributes.hpp
+++ b/include/aidge/utils/DynamicAttributes.hpp
@@ -46,40 +46,35 @@ public:
      *  exist
      * \note at() throws if the Attribute does not exist, using find to test for Attribute existance
      */
-    template<class T> T& getAttr(const std::string& name)
+    template<class T> const T& getAttr(const std::string& name) const
     {
-        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
 #ifdef PYBIND
-        // If attribute does not exist in C++, it might have been created or modified in Python
-        auto it = mAttrs.find(name);
-        if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(pascalToSnake(name));
-            if (itPy != mAttrsPy.end()) {
-                // Insert the attribute back in C++
-                mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
+            // If attribute does not exist in C++, it might have been created or modified in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    // Insert the attribute back in C++
+                    mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
+                }
             }
-        }
 #endif
 
-        return future_std::any_cast<T&>(mAttrs.at(name));
-    }
-
-    template<class T> const T& getAttr(const std::string& name) const
-    {
-        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
-#ifdef PYBIND
-        // If attribute does not exist in C++, it might have been created or modified in Python
-        auto it = mAttrs.find(name);
-        if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(pascalToSnake(name));
-            if (itPy != mAttrsPy.end()) {
-                // Insert the attribute back in C++
-                mAttrs.emplace(std::make_pair(name, future_std::any(itPy->second.cast<T>())));
-            }
+            return future_std::any_cast<const T&>(mAttrs.at(name));
         }
-#endif
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAttr<T>(nsName);
+        }
+    }
 
-        return future_std::any_cast<const T&>(mAttrs.at(name));
+    template<class T> T& getAttr(const std::string& name) {
+        // Scott Meyers' solution to avoid code duplication
+        return const_cast<T&>(
+            static_cast<const DynamicAttributes&>(*this).getAttr<T>(name));
     }
 
     ///\brief Add a new Attribute, identified by its name. If it already exists, asserts.
@@ -88,17 +83,26 @@ public:
     ///\param value Attribute value
     template<class T> void addAttr(const std::string& name, const T& value)
     {
-        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
-        const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
-        AIDGE_ASSERT(res.second, "attribute already exists");
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            const auto& res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
+            AIDGE_ASSERT(res.second, "addAttr(): attribute \"{}\" already exists. Use setAttr() if this is expected.", name);
 
 #ifdef PYBIND
-        // We cannot handle Python object if the Python interpreter is not running
-        if (Py_IsInitialized()) {
-            // Keep a copy of the attribute in py::object that is updated everytime
-            mAttrsPy.emplace(std::make_pair(pascalToSnake(name), py::cast(value)));
-        }
+            // We cannot handle Python object if the Python interpreter is not running
+            if (Py_IsInitialized()) {
+                // Keep a copy of the attribute in py::object that is updated everytime
+                const auto& resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+                AIDGE_ASSERT(resPy.second, "addAttr(): attribute \"{}\" already exists (added in Python). Use setAttr() if this is expected.", name);
+            }
 #endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, future_std::any(DynamicAttributes())));
+            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttr(nsName, value);
+        }
     }
 
     ///\brief Set an Attribute value, identified by its name. If it already exists, its value (and type, if different) is changed.
@@ -107,56 +111,94 @@ public:
     ///\param value Attribute value
     template<class T> void setAttr(const std::string& name, const T& value)
     {
-        auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
-        if (!res.second)
-            res.first->second = future_std::any(value);
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto res = mAttrs.emplace(std::make_pair(name, future_std::any(value)));
+            if (!res.second)
+                res.first->second = future_std::any(value);
 
 #ifdef PYBIND
-        // We cannot handle Python object if the Python interpreter is not running
-        if (Py_IsInitialized()) {
-            // Keep a copy of the attribute in py::object that is updated everytime
-            auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
-            if (!resPy.second)
-                resPy.first->second = std::move(py::cast(value));
-        }
+            // We cannot handle Python object if the Python interpreter is not running
+            if (Py_IsInitialized()) {
+                // Keep a copy of the attribute in py::object that is updated everytime
+                auto resPy = mAttrsPy.emplace(std::make_pair(name, py::cast(value)));
+                if (!resPy.second)
+                    resPy.first->second = std::move(py::cast(value));
+            }
 #endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            auto res = mAttrs.emplace(std::make_pair(ns, future_std::any(DynamicAttributes())));
+            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttr<T>(nsName, value);
+        }
     }
 
     void delAttr(const std::string& name) {
-        mAttrs.erase(name);
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            mAttrs.erase(name);
 #ifdef PYBIND
-        mAttrsPy.erase(name);
+            mAttrsPy.erase(name);
 #endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            future_std::any_cast<DynamicAttributes&>(mAttrs.at(ns)).delAttr(nsName);
+        }
     }
 
 #ifdef PYBIND
     void addAttrPy(const std::string& name, py::object&& value)
     {
-        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
-        auto it = mAttrs.find(snakeToPascal(name));
-        AIDGE_ASSERT(it == mAttrs.end(), "attribute already exists");
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto it = mAttrs.find(name);
+            AIDGE_ASSERT(it == mAttrs.end(), "add_attr(): attribute \"{}\" already exists (added in C++). Use set_attr() if this is expected.", name);
+
+            const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
+            AIDGE_ASSERT(res.second, "add_attr(): attribute \"{}\" already exists. Use set_attr() if this is expected.", name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
 
-        const auto& res = mAttrsPy.emplace(std::make_pair(name, value));
-        AIDGE_ASSERT(res.second, "attribute already exists");
+            future_std::any_cast<DynamicAttributes&>(res.first->second).addAttrPy(nsName, std::move(value));
+        }
     }
 
     void setAttrPy(const std::string& name, py::object&& value) override final
     {
-        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
-        auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
-        if (!resPy.second)
-            resPy.first->second = std::move(value);
-
-        // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
-        const std::string pascalName = snakeToPascal(name);
-        mAttrs.erase(pascalName);
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto resPy = mAttrsPy.emplace(std::make_pair(name, value));
+            if (!resPy.second)
+                resPy.first->second = std::move(value);
+
+            // Force getAttr() to take attribute value from mAttrsPy and update mAttrs
+            mAttrs.erase(name);
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            const auto& res = mAttrs.emplace(std::make_pair(ns, DynamicAttributes()));
+
+            future_std::any_cast<DynamicAttributes&>(res.first->second).setAttrPy(nsName, std::move(value));
+        }
     }
 
     py::dict dict() const override {
         py::dict attributes;
+        for (const auto& elt : mAttrs) {
+            if (elt.second.type() == typeid(DynamicAttributes)) {
+                attributes[elt.first.c_str()] = future_std::any_cast<const DynamicAttributes&>(elt.second).dict();
+            }
+        }
         for (const auto& elt : mAttrsPy) {
-            const std::string snakeName = pascalToSnake(elt.first);
-            attributes[snakeName.c_str()] = elt.second;
+            attributes[elt.first.c_str()] = elt.second;
         }
         return attributes;
     }
@@ -177,15 +219,46 @@ public:
     ///     Generic Attributes API
     //////////////////////////////////////
     bool hasAttr(const std::string& name) const override final {
-        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
-        return (mAttrs.find(name) != mAttrs.cend());
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+#ifdef PYBIND
+            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
+
+#else
+            return (mAttrs.find(name) != mAttrs.cend());
+#endif
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto it = mAttrs.find(ns);
+            if (it != mAttrs.cend()) {
+                const auto nsName = name.substr(dot + 1);
+                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttr(nsName);
+            }
+            else {
+                return false;
+            }
+        }
     }
 
 #ifdef PYBIND
     bool hasAttrPy(const std::string& name) const override final {
-        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python.");
-        // Attributes might have been created in Python, the second condition is necessary.
-        return (mAttrs.find(snakeToPascal(name)) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            // Attributes might have been created in Python, the second condition is necessary.
+            return (mAttrs.find(name) != mAttrs.cend() || mAttrsPy.find(name) != mAttrsPy.cend());
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto it = mAttrs.find(ns);
+            if (it != mAttrs.cend()) {
+                const auto nsName = name.substr(dot + 1);
+                return future_std::any_cast<const DynamicAttributes&>(it->second).hasAttrPy(nsName);
+            }
+            else {
+                return false;
+            }
+        }
     }
 #endif
 
@@ -193,18 +266,26 @@ public:
         // In order to remain consistent between C++ and Python, with or without PyBind, the name of the type is:
         // - C-style for C++ created attributes
         // - Python-style for Python created attributes
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
 #ifdef PYBIND
-        // If attribute does not exist in C++, it might have been created in Python
-        auto it = mAttrs.find(name);
-        if (it == mAttrs.end()) {
-            auto itPy = mAttrsPy.find(name);
-            if (itPy != mAttrsPy.end()) {
-                return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+            // If attribute does not exist in C++, it might have been created in Python
+            auto it = mAttrs.find(name);
+            if (it == mAttrs.end()) {
+                auto itPy = mAttrsPy.find(name);
+                if (itPy != mAttrsPy.end()) {
+                    return std::string(Py_TYPE(itPy->second.ptr())->tp_name);
+                }
             }
-        }
 #endif
 
-        return mAttrs.at(name).type().name();
+            return mAttrs.at(name).type().name();
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAttrType(nsName);
+        }
     }
 
     std::set<std::string> getAttrsName() const override final {
@@ -226,7 +307,24 @@ public:
      * The strategy here is to keep a copy of each attribute in py::object that is updated everytime.
     */
     inline py::object getAttrPy(const std::string& name) const override final {
-        return mAttrsPy.at(name);
+        const auto dot = name.find('.');
+        if (dot == name.npos) {
+            auto itPy = mAttrsPy.find(name);
+            if (itPy == mAttrsPy.end()) {
+                // Attribute may be a namespace
+                auto it = mAttrs.find(name);
+                AIDGE_ASSERT(it != mAttrs.end() && it->second.type() == typeid(DynamicAttributes), "get_attr(): attribute \"{}\" not found", name);
+                return py::cast(future_std::any_cast<const DynamicAttributes&>(it->second));
+            }
+            else {
+                return itPy->second;
+            }
+        }
+        else {
+            const auto ns = name.substr(0, dot);
+            const auto nsName = name.substr(dot + 1);
+            return future_std::any_cast<const DynamicAttributes&>(mAttrs.at(ns)).getAttrPy(nsName);
+        }
     };
 #endif
 
diff --git a/include/aidge/utils/Log.hpp b/include/aidge/utils/Log.hpp
index a400f8046d07df4ff4493470737f5c4d42945db7..f198e83fbacdc2cceee1c947d0c17244d4c9953e 100644
--- a/include/aidge/utils/Log.hpp
+++ b/include/aidge/utils/Log.hpp
@@ -145,6 +145,14 @@ public:
         mConsoleLevel = level;
     }
 
+    /**
+     * Set or disable colors on console.
+     * Initial value should be assumed true.
+    */
+    static void setConsoleColor(bool enabled) {
+        mConsoleColor = enabled;
+    }
+
     /**
      * Set the minimum log level saved in the log file.
     */
@@ -173,6 +181,7 @@ private:
     static void initFile(const std::string& fileName);
 
     static Level mConsoleLevel;
+    static bool mConsoleColor;
     static Level mFileLevel;
     static std::string mFileName;
     static std::unique_ptr<FILE, decltype(&std::fclose)> mFile;
diff --git a/include/aidge/utils/StaticAttributes.hpp b/include/aidge/utils/StaticAttributes.hpp
index 8fc88ff79c50751ba7b79662fc9fc430d4ed601d..3bb41b5bb0d9c2727d95a2656a1a2d5b96ff950b 100644
--- a/include/aidge/utils/StaticAttributes.hpp
+++ b/include/aidge/utils/StaticAttributes.hpp
@@ -180,7 +180,6 @@ public:
     //////////////////////////////////////
     // Runtime existance check with name
     bool hasAttr(const std::string& name) const override final {
-        AIDGE_ASSERT(isPascalCase(name), "Aidge standard requires PascalCase for C++ Attributes.");
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
             if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return true;
@@ -192,10 +191,8 @@ public:
 
 #ifdef PYBIND
         bool hasAttrPy(const std::string& name) const override final {
-        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
-        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 return true;
             }
         }
@@ -234,7 +231,7 @@ public:
     static std::set<std::string> staticGetAttrsName() {
         std::set<std::string> attrsName;
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            attrsName.insert(pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i])));
+            attrsName.insert(std::string(EnumStrings<ATTRS_ENUM>::data[i]));
         }
         return attrsName;
     }
@@ -244,10 +241,9 @@ public:
         if (name == "__dict__") {
             return py::none();
         }
-        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
-        const std::string pascalName = snakeToPascal(name);
+
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
                 // Normal accessor would not work as we convert the tuple to a py::object which can be anything
                 return py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
@@ -264,10 +260,8 @@ public:
 
 
     void setAttrPy(const std::string& name, py::object&& value) override final{
-        AIDGE_ASSERT(isSnakeCase(name), "Aidge standard requires snake_case for Attributes with Python, got '{}'.", name);
-        const std::string pascalName = snakeToPascal(name);
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            if (pascalName == EnumStrings<ATTRS_ENUM>::data[i]) {
+            if (name == EnumStrings<ATTRS_ENUM>::data[i]) {
                 // Cannot update attribute using reference has it would require templating
                 // Use a dirty
                 auto tmpAttr = py::cast(mAttrs);
@@ -282,10 +276,9 @@ public:
     py::dict dict() const override {
         py::dict attributes;
         for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-            const std::string snakeName = pascalToSnake(EnumStrings<ATTRS_ENUM>::data[i]);
-                // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
-                // Normal accessor would not work as we convert the tuple to a py::object which can be anything
-            attributes[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+            // https://github.com/pybind/pybind11/blob/f3e0602802c7840992c97f4960515777cad6a5c7/include/pybind11/pytypes.h#L1119-L1138
+            // Normal accessor would not work as we convert the tuple to a py::object which can be anything
+            attributes[EnumStrings<ATTRS_ENUM>::data[i]] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
         }
         return attributes;
     }
@@ -306,8 +299,7 @@ public:
     // AttrDict get_a() const {
     //     py::dict attributes_;
     //     for (std::size_t i = 0; i < size(EnumStrings<ATTRS_ENUM>::data); ++i) {
-    //         const std::string snakeName = pascalToSnake(std::string(EnumStrings<ATTRS_ENUM>::data[i]));
-    //         attributes_[snakeName.c_str()] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
+    //         attributes_[EnumStrings<ATTRS_ENUM>::data[i]] = py::detail::accessor_policies::tuple_item::get(py::cast(mAttrs), static_cast<py::size_t>(i));
     //     }
     //     return AttrDict(attributes_);
     // }
diff --git a/python_binding/data/pybind_Data.cpp b/python_binding/data/pybind_Data.cpp
index c6595360b17ee08eaa82d483987914adc67b60a8..1d4eae0776b66a16e6472a51661b22fe281e6f6b 100644
--- a/python_binding/data/pybind_Data.cpp
+++ b/python_binding/data/pybind_Data.cpp
@@ -17,20 +17,42 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Data(py::module& m){
-    // TODO : extend with more values !
-    py::enum_<DataType>(m, "dtype")
-    .value("float64", DataType::Float64)
-    .value("float32", DataType::Float32)
-    .value("float16", DataType::Float16)
-    .value("int8", DataType::Int8)
-    .value("int16", DataType::Int16)
-    .value("int32", DataType::Int32)
-    .value("int64", DataType::Int64)
-    .value("uint8", DataType::UInt8)
-    .value("uint16", DataType::UInt16)
-    .value("uint32", DataType::UInt32)
-    .value("uint64", DataType::UInt64)
-    ;
+    // Define enumeration names for python as lowercase dtype name
+    // This defined enum names compatible with basic numpy dtype
+    // name such as: float32, flot64, [u]int32, [u]int64, ...
+    auto python_enum_name = [](const DataType& dtype) {
+        auto str_lower = [](std::string& str) {
+            std::transform(str.begin(), str.end(), str.begin(),
+                           [](unsigned char c){
+                               return std::tolower(c);
+                           });
+        };
+        auto dtype_name = std::string(Aidge::format_as(dtype));
+        str_lower(dtype_name);
+        return dtype_name;
+    };
+    // Auto generate enumeration names from lowercase dtype strings
+    std::vector<std::string> enum_names;
+    for (auto dtype_str : EnumStrings<Aidge::DataType>::data) {
+        auto dtype = static_cast<DataType>(enum_names.size());
+        auto enum_name = python_enum_name(dtype);
+        enum_names.push_back(enum_name);
+    }
+
+    // Define python side enumeration aidge_core.dtype
+    auto e_dtype = py::enum_<DataType>(m, "dtype");
+
+    // Add enum value for each enum name
+    for (std::size_t idx = 0; idx < enum_names.size(); idx++) {
+        e_dtype.value(enum_names[idx].c_str(), static_cast<DataType>(idx));
+    }
+
+    // Define str() to return the bare enum name value, it allows
+    // to compare directly for instance str(tensor.dtype())
+    // with str(nparray.dtype)
+    e_dtype.def("__str__", [enum_names](const DataType& dtype) {
+        return enum_names[static_cast<int>(dtype)];
+    }, py::prepend());;
 
     py::class_<Data, std::shared_ptr<Data>>(m,"Data");
 
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 60283039b709b783484ba0b1cf821497e5bb3a8f..1d0f02a507514153621fac3dcc9681989b6f94ff 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -93,6 +93,7 @@ void init_Tensor(py::module& m){
     .def("get_coord", &Tensor::getCoord)
     .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
+    .def("undefined", &Tensor::undefined)
     .def("__str__", [](Tensor& b) {
         if (b.empty()) {
             return std::string("{}");
diff --git a/python_binding/graph/pybind_Node.cpp b/python_binding/graph/pybind_Node.cpp
index 06c171214d5df261e5df832179a0fa69420aab7d..1fa552ce153b2b0f655ca9f38d1d80f62390184b 100644
--- a/python_binding/graph/pybind_Node.cpp
+++ b/python_binding/graph/pybind_Node.cpp
@@ -169,7 +169,11 @@ void init_Node(py::module& m) {
                 if (pybind11::isinstance<Connector>(arg)) {
                     // Convert Python object to C++ object adn push it ot vector
                     connectors.push_back(arg.cast<Connector>());
-                } else {
+                }
+                else if (arg.is(py::none())) {
+                    connectors.push_back(Connector());
+                }
+                else {
                     throw std::runtime_error("One of the arguments was not a Connector.");
                 }
             }
diff --git a/python_binding/operator/pybind_Operator.cpp b/python_binding/operator/pybind_Operator.cpp
index 2b2f30f14931fd041bfb4ec1a712e5c9419fdf22..dbf71a3cad870d848fbc2f5f67c13d5347b38b89 100644
--- a/python_binding/operator/pybind_Operator.cpp
+++ b/python_binding/operator/pybind_Operator.cpp
@@ -37,7 +37,7 @@ void init_Operator(py::module& m){
     py::class_<Operator, std::shared_ptr<Operator>>(m, "Operator")
     .def("__repr__", &Operator::repr)
     .def("backend", &Operator::backend)
-    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput), py::arg("outputIdx"), py::arg("data"))
+    .def("set_output", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setOutput, py::const_), py::arg("outputIdx"), py::arg("data"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
     .def("get_raw_output", &Operator::getRawOutput, py::arg("outputIdx"))
     .def("set_input", py::overload_cast<const IOIndex_t, const std::shared_ptr<Data>&>(&Operator::setInput), py::arg("inputIdx"), py::arg("data"))
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index 4d4541ab36468bc6b531e0242888dd70c5afc71f..8c515e321207605c20acc9e5b02271906c9707d1 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -28,7 +28,7 @@ void init_OperatorTensor(py::module& m){
     .def("get_output", &OperatorTensor::getOutput, py::arg("outputIdx"))
     .def("get_input", &OperatorTensor::getInput, py::arg("inputIdx"))
 
-    .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
+    .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&) const) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
     .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
     .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false)
     .def("dims_forwarded", &OperatorTensor::dimsForwarded)
diff --git a/python_binding/operator/pybind_Split.cpp b/python_binding/operator/pybind_Split.cpp
index 6efc123864f21bf8ea02008b29fe59f31685f17c..f63a01f9815aa59cfbad0aea36f148899f44c9ea 100644
--- a/python_binding/operator/pybind_Split.cpp
+++ b/python_binding/operator/pybind_Split.cpp
@@ -21,14 +21,13 @@ namespace py = pybind11;
 namespace Aidge {
 
 void init_Split(py::module& m) {
-    py::class_<Split_Op, std::shared_ptr<Split_Op>, Attributes, OperatorTensor>(m, "SplitOp", py::multiple_inheritance())
+    py::class_<Split_Op, std::shared_ptr<Split_Op>, OperatorTensor>(m, "SplitOp", py::multiple_inheritance())
         .def(py::init<DimSize_t, std::int8_t, std::vector<DimSize_t>&>(),
                 py::arg("nb_outputs"),
                 py::arg("axis"),
                 py::arg("split"))
         .def_static("get_inputs_name", &Split_Op::getInputsName)
-        .def_static("get_outputs_name", &Split_Op::getOutputsName)
-        .def_static("attributes_name", &Split_Op::staticGetAttrsName);
+        .def_static("get_outputs_name", &Split_Op::getOutputsName);
 
     declare_registrable<Split_Op>(m, "SplitOp");
 
diff --git a/python_binding/utils/pybind_Attributes.cpp b/python_binding/utils/pybind_Attributes.cpp
index 7f5dde63c4835eb694d5fd2d571d7c9c1fd5a9ac..bc0ccb3f4053e37c186acd919fcadae9d5d19a40 100644
--- a/python_binding/utils/pybind_Attributes.cpp
+++ b/python_binding/utils/pybind_Attributes.cpp
@@ -21,14 +21,17 @@ namespace Aidge {
 
 DynamicAttributes test_DynamicAttributes_binding() {
     DynamicAttributes attrs;
-    attrs.addAttr<int>("A", 42);
-    attrs.addAttr<std::string>("B", "test");
-    attrs.addAttr<std::vector<bool>>("C", {true, false, true});
+    attrs.addAttr<int>("a", 42);
+    attrs.addAttr<std::string>("b", "test");
+    attrs.addAttr<std::vector<bool>>("c", {true, false, true});
+    attrs.addAttr("mem.a", 1);
+    attrs.addAttr("mem.data.b", 1.0f);
+    attrs.addAttr("impl.c", std::string("test"));
     return attrs;
 }
 
 double test_DynamicAttributes_binding_check(DynamicAttributes& attrs) {
-    return attrs.getAttr<double>("D");
+    return attrs.getAttr<double>("d");
 }
 
 void init_Attributes(py::module& m){
@@ -44,6 +47,7 @@ void init_Attributes(py::module& m){
 
 
     py::class_<DynamicAttributes, std::shared_ptr<DynamicAttributes>, Attributes>(m, "DynamicAttributes")
+    .def(py::init<>())
     .def("add_attr", &DynamicAttributes::addAttrPy, py::arg("name"), py::arg("value"))
     .def("del_attr", &DynamicAttributes::delAttr, py::arg("name"));
 
diff --git a/python_binding/utils/pybind_Log.cpp b/python_binding/utils/pybind_Log.cpp
index 7b5e7548b3126ed2ebfe3d9243248dc070c54076..f70a4bfab54ee14194ea04f96efa33a6b8e04201 100644
--- a/python_binding/utils/pybind_Log.cpp
+++ b/python_binding/utils/pybind_Log.cpp
@@ -82,6 +82,14 @@ void init_Log(py::module& m){
           :param level: Log level.
           :type level: Level
           )mydelimiter")
+    .def_static("set_console_color", &Log::setConsoleColor, py::arg("enabled"),
+          R"mydelimiter(
+          Enables or disable color output on comsole.
+          Initial value should be assumed True.
+
+          :param enabled: Activate or deactivate colors on console.
+          :type enabled: bool
+          )mydelimiter")
     .def_static("set_file_level", &Log::setFileLevel, py::arg("level"),
           R"mydelimiter(
           Set the minimum log level saved in the log file.
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index de200300a99bb33180103608238855b2f5604145..d992703fedb224e6650ce2ad50317cda3bae650f 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -29,7 +29,7 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->empty()) {
+        if (!input->undefined()) {
             // Known amount of data: requires the whole tensor by default
             return Elts_t::DataElts(input->size());
         }
@@ -46,7 +46,7 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inpu
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->empty()) {
+        if (!input->undefined()) {
             // Known amount of data: protect the whole tensor by default
             return Elts_t::DataElts(input->size());
         }
@@ -67,7 +67,7 @@ Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outp
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
     if (mOp.getRawOutput(outputIdx)) {
         const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
-        if (!output->empty()) {
+        if (!output->undefined()) {
             // Known amount of data: requires the whole tensor by default,
             // regardless of available data on inputs
             return Elts_t::DataElts(output->size());
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 28fb90cebf8e387e69f1ec39c46a6a47c8a4d316..d1bf32594c9a79b6519613327c87370facc138ad 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -150,13 +150,12 @@ Aidge::Tensor::~Tensor() noexcept = default;
 
 void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
                            std::vector<Aidge::DimSize_t> strides) {
-    // TODO: scalar Tensor not handled
     if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
         mContiguous = true;
 
-        computeSize();
+        computeSize(); // will set mSize to 1
         if (mImpl) {
             mImpl->resize(mDims);
         }
@@ -214,7 +213,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
 
 std::string Aidge::Tensor::toString() const {
     AIDGE_ASSERT(
-        mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) ||
+        mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
                   (mImpl->hostPtr() != nullptr)),
         "tensor should have a valid host pointer");
 
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 9528e511be230cd8ac689876689f313782c9b0ab..4ec3334454034f20badb246b7030594bee0c0e48 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -152,7 +152,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
                 // Add-on to display the operator's output dimensions
                 std::string dims = "";
                 const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
-                if (op && !op->getOutput(outputIdx)->dims().empty()) {
+                if (op && !op->getOutput(outputIdx)->undefined()) {
                   dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
                 }
 
@@ -198,7 +198,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         // Add-on to display the operator's output dimensions
         std::string dims = "";
         const auto op = std::dynamic_pointer_cast<OperatorTensor>(output.first->getOperator());
-        if (op && op->getOutput(output.second) && !op->getOutput(output.second)->dims().empty()) {
+        if (op && op->getOutput(output.second) && !op->getOutput(output.second)->undefined()) {
           dims += " " + fmt::format("{}", op->getOutput(output.second)->dims());
         }
 
@@ -441,8 +441,8 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 // Input is missing
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
-                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
-                  "Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
+                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined(),
+                  "Undefined input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
             }
 
         }
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 16306cd875cf67963250c8bcd391e9d9c00a26f5..4649a954a095d239dbe7de7bcbebf1025a3b22c6 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -63,14 +63,8 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
         return false;
     }
     const std::size_t nbDimsInput0 = getInput(0)->nbDims();
-    if (nbDimsInput0 == 0) {
-        return false;
-    }
-    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
+    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is scalar", type());
     for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        if (getInput(i)->nbDims() == 0) {
-            return false;
-        }
         AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
             "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
             i, type(), nbDimsInput0, getInput(i)->nbDims());
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index c28a0587a755ef0a910ec5bfdeb9caa2f1edc216..cd3c4357434ec4b49b6ea05e0d2633adfee7bfd0 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -51,7 +51,7 @@ void Aidge::Gather_OpImpl::forward() {
 const std::string Aidge::Gather_Op::Type = "Gather";
 
 bool Aidge::Gather_Op::dimsForwarded() const {
-    if (getInput(1) && !getInput(1)->empty()) {
+    if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
         return false;
     }
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 5abfff9d8202003cbe5a76a94fab9d9ab5176b6e..207229b93b0ae362f42c1bae6fb1455b5a2b9d3d 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -69,7 +69,10 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
 
             mOutputs[0]->resize(outDims);
             return true;
+        } else {
+          AIDGE_ASSERT(false, "Incompatible scalar and N-D sizes.");
         }
+
     }
 
     return false;
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index adf79b5c69e991ad7979184c313448e4288a8ecb..88a182f2ae7d51abb059faa64058fb701a033b56 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -85,12 +85,12 @@ bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated(false)) {
         // Only require one of the input to have dims defined
         // Otherwise, forwardDims() won't converge!
-        if (!(getInput(0)->empty())) {
+        if (!(getInput(0)->undefined())) {
             const auto expectedDims =  getInput(0)->dims();
             mOutputs[0]->resize(expectedDims);
             return true;
         }
-        else if (!(getInput(1)->empty())) {
+        else if (!(getInput(1)->undefined())) {
             const auto expectedDims =  getInput(1)->dims();
             mOutputs[0]->resize(expectedDims);
             return true;
@@ -105,7 +105,7 @@ bool Aidge::Memorize_Op::dimsForwarded() const {
     bool forwarded = true;
     // check outputs have been filled
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
-        forwarded &= !(getOutput(i)->empty());
+        forwarded &= !(getOutput(i)->undefined());
     }
     return forwarded;
 }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 5df90020a43ad6cffebcd2345c075837f11462b1..ff6fb9ce4b6b8596477dfdd1f43f8927e534459b 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -73,7 +73,7 @@ const std::shared_ptr<Aidge::Tensor>& Aidge::OperatorTensor::getInput(const Aidg
     return mInputs[inputIdx];
 }
 
-void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) {
+void Aidge::OperatorTensor::setOutput(const Aidge::IOIndex_t outputIdx, const std::shared_ptr<Aidge::Data>& data) const {
     AIDGE_ASSERT(data->type() == Tensor::Type, "{} Operator only accepts Tensors as inputs", type());
     AIDGE_ASSERT(outputIdx < nbOutputs(), "{} Operator has {} outputs", type(), nbOutputs());
     const auto& data_tensor = std::dynamic_pointer_cast<Tensor>(data);
@@ -123,7 +123,7 @@ bool Aidge::OperatorTensor::inputsAssociated(bool checkNonEmpty) const {
         }
 
         if (checkNonEmpty && getInput(i)) {
-            associated &= !(getInput(i)->empty());
+            associated &= !(getInput(i)->undefined());
         }
     }
 
@@ -152,13 +152,13 @@ bool Aidge::OperatorTensor::dimsForwarded() const {
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
         if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
-            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+            forwarded &= mInputs[i] ? !(getInput(i)->undefined()) : false;
         }
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         // If getOutput(i) is nullptr, ignore this output (it may be a dummy
         // output in a MetaOperator)
-        forwarded &= (getOutput(i)) ? !(getOutput(i)->empty()) : true;
+        forwarded &= (getOutput(i)) ? !(getOutput(i)->undefined()) : true;
     }
     return forwarded;
 }
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 4184fc18abbc5490a1d6fbf7363fef817c7ecbc9..cc31eeea758853a4183569d58412c427bd32006c 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -31,7 +31,7 @@ void Aidge::Reshape_OpImpl::forward() {
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
 bool Aidge::Reshape_Op::dimsForwarded() const {
-    if (getInput(1) && !getInput(1)->empty()) {
+    if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
         return false;
     }
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 966e1c3e032e64e75d3606fca022b84f9da8fbaf..0d407d4f97a17b8a89378bc83c1039423d9b2949 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -27,9 +27,9 @@ const std::string Aidge::Resize_Op::Type = "Resize";
 
 bool Aidge::Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
-    if ((getInput(1) && !getInput(1)->empty())
-        || (getInput(2) && !getInput(2)->empty())
-        || (getInput(3) && !getInput(3)->empty())
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined())
+        || (getInput(3) && !getInput(3)->undefined())
         )
     {
         // output dims are data dependent
@@ -44,9 +44,9 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
         AIDGE_ASSERT(getInput(0)->nbDims() == 4,
             "input tensor must have dimensions = 4 (batch, channel, height, width).");
 
-        const bool input1ROIPresent           = getInput(1) && !getInput(1)->empty();
-        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->empty();
-        const bool input3SizesPresent         = getInput(3) && !getInput(3)->empty();
+        const bool input1ROIPresent           = getInput(1) && !getInput(1)->undefined();
+        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->undefined();
+        const bool input3SizesPresent         = getInput(3) && !getInput(3)->undefined();
 
         AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
 
@@ -118,4 +118,4 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     if(getInput(3)) {
         getInput(3)->setBackend(name, device);
     }
-}
\ No newline at end of file
+}
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 8166712e1e5fd967bb9328e95ecf8c5388636ba7..39f5e2fe09b7ac750b8ea9d48d17fc2e97013c1a 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -34,17 +34,17 @@ const std::string Aidge::Shape_Op::Type = "Shape";
 
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
-        if (mAttributes->template getAttr<std::int64_t>("Start") < 0)
-            mAttributes->template getAttr<std::int64_t>("Start") += static_cast<std::int64_t>(getInput(0)->nbDims());
-        if (mAttributes->template getAttr<std::int64_t>("End") < 0)
-            mAttributes->template getAttr<std::int64_t>("End") += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (this->start() < 0)
+            this->start() += static_cast<std::int64_t>(getInput(0)->nbDims());
+        if (this->end() < 0)
+            this->end() += static_cast<std::int64_t>(getInput(0)->nbDims());
 
-        const auto start = mAttributes->template getAttr<std::int64_t>("Start");
-        const auto end = mAttributes->template getAttr<std::int64_t>("End");
+        const auto start = this->start();
+        const auto end = this->end();
         const auto nbDims = static_cast<std::int64_t>(getInput(0)->nbDims());
         const DimSize_t roi = end - start + 1;
 
-        AIDGE_ASSERT(start < nbDims && end < nbDims, "'Start' and 'End' must be < {}", nbDims);
+        AIDGE_ASSERT(start < nbDims && end < nbDims, "'start' and 'end' must be < {}", nbDims);
         AIDGE_ASSERT(roi> 1, "Unvalid ROI for Shape");
 
         mOutputs[0]->resize({roi});
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 3cc2de686435a304326e2a4a60dad6c12a50349c..4fcfd587a9b3d8858b2e8a71605743c6702cb310 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -29,10 +29,10 @@
 const std::string Aidge::Slice_Op::Type = "Slice";
 
 bool Aidge::Slice_Op::dimsForwarded() const {
-    if ((getInput(1) && !getInput(1)->empty())
-        || (getInput(2) && !getInput(2)->empty())
-        || (getInput(3) && !getInput(3)->empty())
-        || (getInput(4) && !getInput(4)->empty()))
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined())
+        || (getInput(3) && !getInput(3)->undefined())
+        || (getInput(4) && !getInput(4)->undefined()))
     {
         // output dims are data dependent
         return false;
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index a0cb049b19e9411daf65bbe2a10319c62b32c1b8..af7474d8a21db9ece237440b46ecf57db9b270b4 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -28,8 +28,8 @@
 
 void Aidge::Split_OpImpl::forward() {
     const Split_Op& op = dynamic_cast<const Split_Op&>(mOp);
-    const auto axis = op.template getAttr<std::int8_t>("Axis");
-    const auto splits = op.template getAttr<std::vector<DimSize_t>>("Split");
+    const auto axis = op.axis();
+    const auto splits = op.split();
     const auto dims = op.getInput(0)->dims();
 
     //Compute pre/post axis strides
@@ -55,7 +55,7 @@ void Aidge::Split_OpImpl::forward() {
 const std::string Aidge::Split_Op::Type = "Split";
 
 bool Aidge::Split_Op::dimsForwarded() const {
-    if ((getInput(1) && !getInput(1)->empty()))
+    if ((getInput(1) && !getInput(1)->undefined()))
     {
         // output dims are data dependent
         return false;
@@ -68,7 +68,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
         // Copy optional input #1, if present, to attribute Split
         if (getInput(1)) {
-            if (!this->template getAttr<SplitAttr::Split>().empty()) {
+            if (!this->split().empty()) {
                 Log::notice("Split_Op: ignoring non-empty Split attribute because input#1 takes precedence");
             }
 
@@ -78,21 +78,22 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
             }
 
             std::shared_ptr<Tensor> fallback;
-            this->template getAttr<SplitAttr::Split>().reserve(getInput(1)->size());
+            this->split().clear(); // If both are provided input would override attrs
+            this->split().reserve(getInput(1)->size());
             const auto& splits = getInput(1)->refCastFrom(fallback, NativeType<DimSize_t>::type, "cpu");
             std::copy_n(static_cast<DimSize_t*>(splits.getImpl()->hostPtr()),
                         splits.size(),
-                        std::back_inserter(this->template getAttr<SplitAttr::Split>()));
+                        std::back_inserter(this->split()));
         }
 
         // Compute output dims
-        if (this->template getAttr<std::int8_t>("Axis") < 0)
-            this->template getAttr<std::int8_t>("Axis") += static_cast<std::int8_t>(getInput(0)->nbDims());
+        if (this->axis() < 0)
+            this->axis() += static_cast<std::int8_t>(getInput(0)->nbDims());
 
-        DimSize_t dimToSplit = getInput(0)->dims()[this->template getAttr<std::int8_t>("Axis")];
+        DimSize_t dimToSplit = getInput(0)->dims()[this->axis()];
         DimSize_t nbOutput = this->nbOutputs();
         // Fill Split attr if empty
-        if(this->template getAttr<SplitAttr::Split>().empty()) {
+        if(this->split().empty()) {
             // In case the input Split is not provided, divide the dimension of Axis into equal slices
             AIDGE_ASSERT(dimToSplit > nbOutput, "Split_Op: Output number {} musn't be bigger than dimension {}.", nbOutput, dimToSplit);
             DimSize_t baseSliceSize = dimToSplit / nbOutput;
@@ -100,12 +101,12 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
             DimSize_t remainder = dimToSplit % nbOutput;
 
             for (DimSize_t i = 0; i < static_cast<DimSize_t>(nbOutput -1); ++i) {
-                    this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize);
+                    this->split().push_back(baseSliceSize);
             }
-            this->template getAttr<SplitAttr::Split>().push_back(baseSliceSize + remainder);
+            this->split().push_back(baseSliceSize + remainder);
         }
 
-        const auto splits = this->template getAttr<SplitAttr::Split>();
+        const auto splits = this->split();
         AIDGE_ASSERT(splits.size() == nbOutput, "Split_Op: number of slices {} must be equal to number of outputs {}", splits, nbOutput);
         DimSize_t totalSplitSize = std::accumulate(splits.cbegin(), splits.cend(), 0);
         AIDGE_ASSERT(totalSplitSize == dimToSplit, "Split_Op: Total chunks size {} is different from dimension size {}.", totalSplitSize, dimToSplit);
@@ -113,7 +114,7 @@ bool Aidge::Split_Op::forwardDims(bool allowDataDependency) {
         std::vector<DimSize_t> outDims = getInput(0)->dims();
         for (std::size_t i = 0; i < nbOutput; ++i)
         {
-            outDims[this->template getAttr<std::int8_t>("Axis")] = this->template getAttr<SplitAttr::Split>()[i];
+            outDims[this->axis()] = this->split()[i];
             mOutputs[i]->resize(outDims);
         }
 
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 69820a924105acc8bea817aecb90e0aa278fce06..30372e44f8f9641734fc1109bf03a64794383a3e 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -32,6 +32,7 @@ const std::string Aidge::Transpose_Op::Type = "Transpose";
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
+        AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
         std::vector<DimSize_t> outputDims;
         for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
             outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
diff --git a/src/utils/Attributes.cpp b/src/utils/Attributes.cpp
deleted file mode 100644
index e79db53a60a955e3502e070cda5818d3d7b6c922..0000000000000000000000000000000000000000
--- a/src/utils/Attributes.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-/********************************************************************************
- * Copyright (c) 2023 CEA-List
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0
- *
- ********************************************************************************/
-
-#include "aidge/utils/Attributes.hpp"
-
-#include <cctype>  // std::isdigit, std::islower, std::isupper, std::tolower,
-                   // std::toupper
-#include <string>
-
-std::string Aidge::Attributes::snakeToPascal(const std::string& snakeCase) {
-    std::string result;
-    bool to_upper = true; // Start with uppercase for PascalCase
-
-    for (char ch : snakeCase) {
-        if (ch == '_') {
-            to_upper = true; // Next character should be uppercase
-        } else {
-            if (to_upper) {
-                result += std::toupper(ch);
-                to_upper = false; // Reset flag after making a character uppercase
-            } else {
-                result += ch;
-            }
-        }
-    }
-    return result;
-}
-
-std::string Aidge::Attributes::pascalToSnake(const std::string& pascalCase) {
-    std::string result;
-
-    for (char ch : pascalCase) {
-        if (std::isupper(ch)) {
-            if (!result.empty()) {
-                result += '_';
-            }
-            result += std::tolower(ch);
-        } else {
-            result += ch;
-        }
-    }
-    return result;
-}
-
-bool Aidge::Attributes::isPascalCase(const std::string& str) {
-    if (str.empty() || !std::isupper(str[0])) {
-        return false;
-    }
-
-    bool expectUpper = false;
-    for (size_t i = 1; i < str.size(); ++i) {
-        if (str[i] == '_') {
-            return false;
-        }
-        if (std::isupper(str[i])) {
-            if (!expectUpper) {
-                return false;
-            }
-            expectUpper = false;
-        } else if (std::islower(str[i]) || std::isdigit(str[i])) {
-            expectUpper = true;
-        } else {
-            return false;
-        }
-    }
-    return true;
-}
-
-bool Aidge::Attributes::isSnakeCase(const std::string& str) {
-    if (str.empty()) {
-        return false;
-    }
-
-    bool lastCharWasUnderscore = false;
-    for (char ch : str) {
-        if (ch == '_') {
-            if (lastCharWasUnderscore) {
-                return false;
-            }
-            lastCharWasUnderscore = true;
-        } else if (!std::islower(ch) && !std::isdigit(ch)) {
-            return false;
-        } else {
-            lastCharWasUnderscore = false;
-        }
-    }
-    return true;
-}
diff --git a/src/utils/Log.cpp b/src/utils/Log.cpp
index 54af888caca8dc2c4b512515ff70663f9437dd45..ae8816e78b6fc7b8f2288b6873642f0729e195b6 100644
--- a/src/utils/Log.cpp
+++ b/src/utils/Log.cpp
@@ -28,6 +28,16 @@ Aidge::Log::Level Aidge::Log::mConsoleLevel = []() {
     }
     return Info;
 }();
+bool Aidge::Log::mConsoleColor = []() {
+    const char* logColor = std::getenv("AIDGE_LOG_COLOR");
+    if (logColor == nullptr)
+        return true;
+    auto logColorStr = std::string(logColor);
+    if (logColorStr == "off" || logColorStr == "OFF" ||
+        logColorStr == "0")
+        return false;
+    return true;
+}();
 Aidge::Log::Level Aidge::Log::mFileLevel = []() {
     const char* logLevel = std::getenv("AIDGE_LOGLEVEL_FILE");
     if (logLevel != nullptr) {
@@ -55,7 +65,8 @@ void Aidge::Log::log(Level level, const std::string& msg) {
         // Styles that were already applied to msg with fmt are kept also in 
         // the log file.
         const auto modifier
-            = (level == Debug) ? fmt::fg(fmt::color::gray)
+            = !mConsoleColor ? fmt::text_style()
+            : (level == Debug) ? fmt::fg(fmt::color::gray)
             : (level == Notice) ? fmt::fg(fmt::color::medium_purple)
             : (level == Warn) ? fmt::fg(fmt::color::orange)
             : (level == Error) ? fmt::fg(fmt::color::red)
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 62e90dcbd7c20548019afae1a04f84b3e1d4484a..a536f113f7d11eb8cec81b5fdbf57909bd70611d 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -14,13 +14,14 @@
 #include <cstdint>     // std::uint8_t, std::uint16_t, std::int32_t
 #include <numeric>     // std::accumulate, std::inner_product
 #include <functional>  // std::multiplies
-#include <random>      // std::random_device, std::mt19937,
+#include <random>      // std::mt19937,
                        // std::uniform_int_distribution, std::uniform_real_distribution
 #include <set>
 #include <string>
 #include <vector>
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 #include "aidge/data/Data.hpp"
@@ -36,7 +37,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         Tensor T_default{};
         REQUIRE((
             (T_default.dataType() == DataType::Float32) &&
-            (T_default.size() == 1) &&
+            (T_default.size() == 0) &&
             (T_default.dims() == std::vector<DimSize_t>({})) &&
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
@@ -127,7 +128,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         constexpr std::uint16_t NBTRIALS = 10;
 
         // Create random number generators
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         std::mt19937 gen(rd());
         std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
         std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -169,7 +170,7 @@ TEST_CASE("[core/data] Tensor(getter/setter)", "[Tensor][Getter][Setter]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create random number generators
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -261,7 +262,7 @@ TEST_CASE("[core/data] Tensor(other)", "[Tensor][extract][zeros][print]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create random number generators
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
diff --git a/unit_tests/graph/Test_GraphView.cpp b/unit_tests/graph/Test_GraphView.cpp
index 8e9f5a27e275a5ce56ddf57fa092ec96cec84711..d9289c4aa3f4b44ce72d772c9a39dd8e66ab09e7 100644
--- a/unit_tests/graph/Test_GraphView.cpp
+++ b/unit_tests/graph/Test_GraphView.cpp
@@ -17,6 +17,7 @@
 #include <string>
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
@@ -35,7 +36,7 @@ TEST_CASE("genRandomGraph", "[GraphView][randomGen]") {
     size_t nbUnicity = 0;
 
     for (int test = 0; test < nbTests; ++test) {
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         const std::mt19937::result_type seed(rd());
 
         RandomGraph randGraph;
@@ -81,7 +82,7 @@ TEST_CASE("clone", "[GraphView][clone]") {
     const size_t nbTests = 100;
 
     for (int test = 0; test < nbTests; ++test) {
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         const std::mt19937::result_type seed(rd());
 
         RandomGraph randGraph;
@@ -155,7 +156,7 @@ TEST_CASE("remove", "[GraphView][remove]") {
     size_t nbTested = 0;
 
     for (int test = 0; test < nbTests; ++test) {
-        std::random_device rd;
+        auto rd = Catch::Generators::Detail::getSeed;
         const std::mt19937::result_type seed(rd());
 
         RandomGraph randGraph;
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
index 184c02d5208c99b903cf838784bb14fb65799111..fcdf3e8cc1bc07493cfa84608f200f9f334a29cc 100644
--- a/unit_tests/operator/Test_ConcatImpl.cpp
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -18,6 +18,14 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
+    SECTION("Concat scalar inputs") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(4);
+        auto myConcat = Concat(2, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        REQUIRE_THROWS(myConcat->forward());
+    }
     SECTION("Concat 1D inputs") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
         std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
@@ -140,4 +148,4 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
 
         REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
index d11f72474b0b70bf335dfee95d13a9b41cfe6efb..d35edec17cd9732119cfcaf249b5e7965a14ea65 100644
--- a/unit_tests/operator/Test_Div_Op.cpp
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
index d20f689aba55d8cbaef553388d4666fd6c1d7172..15c714b63c2b86e156b43cdaec390ddf60eb7353 100644
--- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef> // std::size_t
 #include <memory>
-#include <random> // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random> // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -25,7 +26,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
           "[GlobalAveragePooling][forwardDims]") {
   constexpr std::uint16_t NB_TRIALS = 10;
   // Create a random number generator
-  std::random_device rd;
+  auto rd = Catch::Generators::Detail::getSeed;
   std::mt19937 gen(rd());
   std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
   std::uniform_int_distribution<std::size_t> inf3DimsDistribution(1, 2);
@@ -46,9 +47,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
   SECTION("Connected Inputs") {
     SECTION("empty tensor") {
       for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
-        const std::size_t nb_dims = 0;
-        std::vector<std::size_t> dims(nb_dims);
-        input_T->resize(dims);
+        // Test that on undefined input it does not fail
         REQUIRE_NOTHROW(op->forwardDims());
       }
     }
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
index bdd1de87c27351e943c59fa616c40dc4a0001abc..876c1ac764efe54475f6d45982acca76aacb7528 100644
--- a/unit_tests/operator/Test_MatMul_Op.cpp
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -22,10 +23,11 @@
 namespace Aidge {
 TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dist(1, 10);
 
+    std::cerr << "Test case start, random " << dist(gen) << " " << rd() << std::endl;
     // Create MatMul Operator
     std::shared_ptr<Node> myMatMul = MatMul();
     auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
@@ -33,24 +35,24 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     /** @todo Special case of scalar Tensor objects.
      * Not handled yet.
     */
-    // SECTION("0-D / 0-D") {
-    //     std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
-    //     T0->resize({});
-    //     op -> associateInput(0,T0);
+    SECTION("0-D / 0-D") {
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        T0->resize({});
+        op -> associateInput(0,T0);
 
-    //     // input_1 - right
-    //     std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
-    //     T1->resize({});
-    //     op -> associateInput(1,T1);
+        // input_1 - right
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        T1->resize({});
+        op -> associateInput(1,T1);
 
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims()).empty());
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims()).empty());
 
-    //     // input_1 - wrong
-    //     T1->resize({dist(gen)});
+        // input_1 - wrong
+        T1->resize({dist(gen)});
 
-    //     REQUIRE_THROWS(op->forwardDims());
-    // }
+        REQUIRE_THROWS(op->forwardDims());
+    }
 
     SECTION("1-D / N-D") {
         // input_0
@@ -193,4 +195,4 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
         REQUIRE_THROWS(op -> forwardDims());
     }
 }
-} // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
index f3f8fb9522943d0a9574cb80cfc228135a973890..bee90d725b25508abf90813532bb5ca754d8fb9a 100644
--- a/unit_tests/operator/Test_Mul_Op.cpp
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
index 4a8d242a355cda58c7b36914efdb1304220f713a..274f7c00b9bd3c3ba57f0463dbe3a1b727141013 100644
--- a/unit_tests/operator/Test_Pow_Op.cpp
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
index 329f3da798854ddff3d1c1393d60c57ef180c70a..110cbbfe68b723a2a670abe590ca5392881170f3 100644
--- a/unit_tests/operator/Test_Sub_Op.cpp
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -10,9 +10,10 @@
  ********************************************************************************/
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 #include <cstddef>  // std::size_t
 #include <memory>
-#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <random>   // std::mt19937, std::uniform_int_distribution
 #include <vector>
 
 #include "aidge/data/Tensor.hpp"
@@ -24,7 +25,7 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
     constexpr std::uint16_t NBTRIALS = 10;
 
     // Create a random number generator
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     std::mt19937 gen(rd());
     std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
     std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
@@ -44,54 +45,54 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
index 8b6eafc70b7eefec6e1ccab9d0cfcde1eb4a09d5..18f0d68d87ac1ee66ffb1f24c4c130f9b020d56e 100644
--- a/unit_tests/operator/Test_TransposeImpl.cpp
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -18,6 +18,16 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Transpose(forward)") {
+    SECTION("Scalar Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(2);
+        std::shared_ptr<Node> myTranspose = Transpose({});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        REQUIRE_THROWS(myTranspose->forward());
+    }
     SECTION("3D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
             {
@@ -120,4 +130,4 @@ TEST_CASE("[cpu/operator] Transpose(forward)") {
 
         REQUIRE(*(op->getOutput(0)) == *output);
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/scheduler/Test_Scheduler.cpp b/unit_tests/scheduler/Test_Scheduler.cpp
index ceaa5e301c820ef54970a0e76004ad3467ae66da..3c3026ff09222f9623d886f9c4574bf23667cd9a 100644
--- a/unit_tests/scheduler/Test_Scheduler.cpp
+++ b/unit_tests/scheduler/Test_Scheduler.cpp
@@ -17,6 +17,7 @@
 #include <string>
 
 #include <catch2/catch_test_macros.hpp>
+#include <catch2/generators/catch_generators_random.hpp>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/data/Tensor.hpp"
@@ -35,7 +36,7 @@ TEST_CASE("randomScheduling", "[Scheduler][randomGen]") {
   std::uniform_int_distribution<std::size_t> nb_nodes_dist(100, 500);
 
   for (int test = 0; test < nbTests; ++test) {
-    std::random_device rd;
+    auto rd = Catch::Generators::Detail::getSeed;
     const std::mt19937::result_type seed(rd());
     std::mt19937 gen(rd());
 
diff --git a/unit_tests/utils/Test_DynamicAttributes.cpp b/unit_tests/utils/Test_DynamicAttributes.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b8a1264b3ad954e776a5ae4c47f03cd0c3fb82c9
--- /dev/null
+++ b/unit_tests/utils/Test_DynamicAttributes.cpp
@@ -0,0 +1,62 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+
+#include <string>
+#include <vector>
+
+#include "aidge/utils/DynamicAttributes.hpp"
+
+using namespace Aidge;
+
+TEST_CASE("[core/attributes] DynamicAttributes") {
+    SECTION("TestAttr") {
+        DynamicAttributes attrs;
+        attrs.addAttr("a", 1);
+        attrs.addAttr("b", 1.0f);
+        attrs.addAttr("c", std::string("test"));
+        attrs.addAttr<std::vector<bool>>("d", {false, true, false});
+
+        REQUIRE(attrs.getAttr<int>("a") == 1);
+        REQUIRE(attrs.getAttr<float>("b") == 1.0f);
+        REQUIRE(attrs.getAttr<std::string>("c") == "test");
+        REQUIRE(attrs.getAttr<std::vector<bool>>("d") == std::vector<bool>{{false, true, false}});
+
+        attrs.addAttr("e", DynamicAttributes());
+        attrs.getAttr<DynamicAttributes>("e").addAttr("e1", 1.0f);
+        attrs.getAttr<DynamicAttributes>("e").addAttr("e2", std::string("test"));
+
+        REQUIRE(attrs.getAttr<DynamicAttributes>("e").getAttr<float>("e1") == 1.0f);
+        REQUIRE(attrs.getAttr<DynamicAttributes>("e").getAttr<std::string>("e2") == "test");
+    }
+
+    SECTION("TestAttrNS") {
+        DynamicAttributes attrs;
+        attrs.addAttr("mem.a", 1);
+        attrs.addAttr("mem.data.b", 1.0f);
+        attrs.addAttr("impl.c", std::string("test"));
+        attrs.addAttr<std::vector<bool>>("d", {false, true, false});
+
+        REQUIRE(attrs.getAttr<int>("mem.a") == 1);
+        REQUIRE(attrs.getAttr<float>("mem.data.b") == 1.0f);
+        REQUIRE(attrs.getAttr<std::string>("impl.c") == "test");
+        REQUIRE(attrs.getAttr<std::vector<bool>>("d") == std::vector<bool>{{false, true, false}});
+
+        attrs.getAttr<DynamicAttributes>("mem.data").addAttr("e", 2.0f);
+        attrs.getAttr<DynamicAttributes>("impl").addAttr("f", std::string("test2"));
+        REQUIRE(attrs.getAttr<float>("mem.data.e") == 2.0f);
+        REQUIRE(attrs.getAttr<std::string>("impl.f") == "test2");
+
+        REQUIRE(attrs.getAttr<DynamicAttributes>("mem.data").getAttr<float>("b") == 1.0f);
+        REQUIRE(attrs.getAttr<DynamicAttributes>("impl").getAttr<std::string>("c") == "test");
+    }
+}