diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index ff5a4fc4b8fe728efd517a74d3a9613a97e8809b..6454ed233c561e386199e4db40ca698ee9edad8a 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -132,10 +132,14 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
         {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int32_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
         {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
-        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
         {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt64(
+        {"cpu", DataType::UInt64}, Aidge::TensorImpl_cpu<uint64_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt32(
+        {"cpu", DataType::UInt32}, Aidge::TensorImpl_cpu<uint32_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
+        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
         {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
 }  // namespace
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index ffee8c41a6e5adc13bad1d884e840986e7a868bb..108f1f2b4af12b3501dbb247d17052e42ebb70ed 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -57,7 +57,8 @@ class Tensor : public Data,
 
     /**
      * @brief Construct a new empty Tensor object.
-     * It has the features of an undefined scalar.
+     * It is considered undefined, i.e. dims can't be forwarded from such a Tensor.
+     * @ref undefined() method for details
      */
     Tensor(DataType dtype = DataType::Float32, DataFormat dformat = DataFormat::Default)
         : Data(Type),
@@ -65,7 +66,7 @@ class Tensor : public Data,
           mDataFormat(dformat),
           mDims(std::vector<DimSize_t>({})),
           mStrides({1}),
-          mSize(1)
+          mSize(0)
     {
         // ctor
     }
@@ -523,14 +524,30 @@ public:
     void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
 
     /**
-     * @brief Return if the Tensor object has at leastone element.
-     * @return true
-     * @return false
+     * @brief Return whether the Tensor object as a rank of 0, i.e. dimensions == {}.
+     * For defined Tensors, this implies that the Tensor is scalar.
+     * For backward compatibility reasons, it is valid to call this predicate
+     * even on undefined Tensors, in which case it returns true.
+     * Hence before test the rank with this method, always check that the
+     * Tensor is not undefined().
+     * In particular for operations such as forwardDims(), one should always
+     * use undefined() to test whether the Tensor dimensions have been defined.
+     * In this case empty() can be used to distinguish scalars from N-D Tensors.
+     * @return true if rank is 0 or the tensor is undefined
      */
     bool empty() const { return mDims.empty(); }
-    // bool newempty() const noexcept {
-    //     return mSize == 0;
-    // }
+
+     /**
+     * @brief Returns whether the Tensor object is undefined.
+     * An undefined Tensor is equivalent to a tensor for which dimensions have not
+     * been defined yet. Hence, dimensions forwarding can't be done from undefined tensors.
+     * The only cases where a tensor is undefined is after the default constructor
+     * and before any call to resize().
+     * Also, as soon as the resize() method has been called, the Tensor is irreversibly defined.
+     * @ref empty() method for distinguishing an undefined from a scalar
+     * @return true if undefined
+     */
+    bool undefined() const { return mSize == 0; }
 
     /**
      * @brief Set each element of the tensor to zero.
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 393798da2fc26b3ef3f5e4cfe54f69fd82174a5f..e07df59d888993cb33da9c20393d897ab9cf1804 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -76,7 +76,7 @@ public:
      * @return false Input has no dimensions or is a nullptr.
      */
     bool dimsForwarded() const override final {
-        return mInputs[0] ? (mInputs[0]->empty() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
+        return mInputs[0] ? (mInputs[0]->undefined() ? false : mInputs[0]->dims() == mOutputs[0]->dims()) : false;
     }
 
 
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 60283039b709b783484ba0b1cf821497e5bb3a8f..1d0f02a507514153621fac3dcc9681989b6f94ff 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -93,6 +93,7 @@ void init_Tensor(py::module& m){
     .def("get_coord", &Tensor::getCoord)
     .def("get_idx", &Tensor::getIdx)
     .def_static("get_available_backends", &Tensor::getAvailableBackends)
+    .def("undefined", &Tensor::undefined)
     .def("__str__", [](Tensor& b) {
         if (b.empty()) {
             return std::string("{}");
diff --git a/src/backend/OperatorImpl.cpp b/src/backend/OperatorImpl.cpp
index de200300a99bb33180103608238855b2f5604145..d992703fedb224e6650ce2ad50317cda3bae650f 100644
--- a/src/backend/OperatorImpl.cpp
+++ b/src/backend/OperatorImpl.cpp
@@ -29,7 +29,7 @@ Aidge::OperatorImpl::OperatorImpl(const Operator& op, const std::string& backend
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inputIdx) const {
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->empty()) {
+        if (!input->undefined()) {
             // Known amount of data: requires the whole tensor by default
             return Elts_t::DataElts(input->size());
         }
@@ -46,7 +46,7 @@ Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredData(const Aidge::IOIndex_t inpu
 Aidge::Elts_t Aidge::OperatorImpl::getNbRequiredProtected(IOIndex_t inputIdx) const {
     if (mOp.getRawInput(inputIdx)) {
         const auto input = std::static_pointer_cast<Tensor>(mOp.getRawInput(inputIdx));
-        if (!input->empty()) {
+        if (!input->undefined()) {
             // Known amount of data: protect the whole tensor by default
             return Elts_t::DataElts(input->size());
         }
@@ -67,7 +67,7 @@ Aidge::Elts_t Aidge::OperatorImpl::getRequiredMemory(const Aidge::IOIndex_t outp
                                                          const std::vector<Aidge::DimSize_t> &/*inputsSize*/) const {
     if (mOp.getRawOutput(outputIdx)) {
         const auto output = std::static_pointer_cast<Tensor>(mOp.getRawOutput(outputIdx));
-        if (!output->empty()) {
+        if (!output->undefined()) {
             // Known amount of data: requires the whole tensor by default,
             // regardless of available data on inputs
             return Elts_t::DataElts(output->size());
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index 28fb90cebf8e387e69f1ec39c46a6a47c8a4d316..d1bf32594c9a79b6519613327c87370facc138ad 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -150,13 +150,12 @@ Aidge::Tensor::~Tensor() noexcept = default;
 
 void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
                            std::vector<Aidge::DimSize_t> strides) {
-    // TODO: scalar Tensor not handled
     if (dims.empty()) {  // scalar
         mDims = std::vector<DimSize_t>(0);
         mStrides = std::vector<DimSize_t>({1});
         mContiguous = true;
 
-        computeSize();
+        computeSize(); // will set mSize to 1
         if (mImpl) {
             mImpl->resize(mDims);
         }
@@ -214,7 +213,7 @@ void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t>& dims,
 
 std::string Aidge::Tensor::toString() const {
     AIDGE_ASSERT(
-        mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) ||
+        mImpl && (undefined() || (dims() == std::vector<DimSize_t>({0})) ||
                   (mImpl->hostPtr() != nullptr)),
         "tensor should have a valid host pointer");
 
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 9528e511be230cd8ac689876689f313782c9b0ab..4ec3334454034f20badb246b7030594bee0c0e48 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -152,7 +152,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
                 // Add-on to display the operator's output dimensions
                 std::string dims = "";
                 const auto op = std::dynamic_pointer_cast<OperatorTensor>(node_ptr->getOperator());
-                if (op && !op->getOutput(outputIdx)->dims().empty()) {
+                if (op && !op->getOutput(outputIdx)->undefined()) {
                   dims += " " + fmt::format("{}", op->getOutput(outputIdx)->dims());
                 }
 
@@ -198,7 +198,7 @@ void Aidge::GraphView::save(const std::string& path, bool verbose, bool showProd
         // Add-on to display the operator's output dimensions
         std::string dims = "";
         const auto op = std::dynamic_pointer_cast<OperatorTensor>(output.first->getOperator());
-        if (op && op->getOutput(output.second) && !op->getOutput(output.second)->dims().empty()) {
+        if (op && op->getOutput(output.second) && !op->getOutput(output.second)->undefined()) {
           dims += " " + fmt::format("{}", op->getOutput(output.second)->dims());
         }
 
@@ -441,8 +441,8 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
                 // Input is missing
                 AIDGE_ASSERT(nodePtr->getOperator()->getRawInput(i),
                   "Missing input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
-                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->empty(),
-                  "Empty input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
+                AIDGE_ASSERT(!std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined(),
+                  "Undefined input#{} for node {} ({})", i, nodePtr->name(), nodePtr->type());
             }
 
         }
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index bf4bbb85be606fc857bf8d771b9ce211ca8e858e..58ee7355e3aec0b86991d8df22753953304dd7c9 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -64,14 +64,8 @@ bool Aidge::Concat_Op::forwardDims(bool /*allowDataDependency*/) {
         return false;
     }
     const std::size_t nbDimsInput0 = getInput(0)->nbDims();
-    if (nbDimsInput0 == 0) {
-        return false;
-    }
-    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is empty", type());
+    AIDGE_ASSERT(nbDimsInput0 > 0, "First input in {} Operator is scalar", type());
     for (IOIndex_t i = 1; i < nbInputs(); ++i) {
-        if (getInput(i)->nbDims() == 0) {
-            return false;
-        }
         AIDGE_ASSERT(nbDimsInput0 == getInput(i)->nbDims(),
             "Input 0 and input {} in {} Operator have different number of dimensions: {} / {}",
             i, type(), nbDimsInput0, getInput(i)->nbDims());
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index c28a0587a755ef0a910ec5bfdeb9caa2f1edc216..cd3c4357434ec4b49b6ea05e0d2633adfee7bfd0 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -51,7 +51,7 @@ void Aidge::Gather_OpImpl::forward() {
 const std::string Aidge::Gather_Op::Type = "Gather";
 
 bool Aidge::Gather_Op::dimsForwarded() const {
-    if (getInput(1) && !getInput(1)->empty()) {
+    if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
         return false;
     }
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 5abfff9d8202003cbe5a76a94fab9d9ab5176b6e..207229b93b0ae362f42c1bae6fb1455b5a2b9d3d 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -69,7 +69,10 @@ bool Aidge::MatMul_Op::forwardDims(bool /*allowDataDependency*/) {
 
             mOutputs[0]->resize(outDims);
             return true;
+        } else {
+          AIDGE_ASSERT(false, "Incompatible scalar and N-D sizes.");
         }
+
     }
 
     return false;
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index adf79b5c69e991ad7979184c313448e4288a8ecb..88a182f2ae7d51abb059faa64058fb701a033b56 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -85,12 +85,12 @@ bool Aidge::Memorize_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated(false)) {
         // Only require one of the input to have dims defined
         // Otherwise, forwardDims() won't converge!
-        if (!(getInput(0)->empty())) {
+        if (!(getInput(0)->undefined())) {
             const auto expectedDims =  getInput(0)->dims();
             mOutputs[0]->resize(expectedDims);
             return true;
         }
-        else if (!(getInput(1)->empty())) {
+        else if (!(getInput(1)->undefined())) {
             const auto expectedDims =  getInput(1)->dims();
             mOutputs[0]->resize(expectedDims);
             return true;
@@ -105,7 +105,7 @@ bool Aidge::Memorize_Op::dimsForwarded() const {
     bool forwarded = true;
     // check outputs have been filled
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
-        forwarded &= !(getOutput(i)->empty());
+        forwarded &= !(getOutput(i)->undefined());
     }
     return forwarded;
 }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 91beab94787d14c93eb4c6434a40afe485fa6bff..ff6fb9ce4b6b8596477dfdd1f43f8927e534459b 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -123,7 +123,7 @@ bool Aidge::OperatorTensor::inputsAssociated(bool checkNonEmpty) const {
         }
 
         if (checkNonEmpty && getInput(i)) {
-            associated &= !(getInput(i)->empty());
+            associated &= !(getInput(i)->undefined());
         }
     }
 
@@ -152,13 +152,13 @@ bool Aidge::OperatorTensor::dimsForwarded() const {
     // check both inputs and outputs have been filled
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
         if (inputCategory(i) != InputCategory::OptionalData && inputCategory(i) != InputCategory::OptionalParam) {
-            forwarded &= mInputs[i] ? !(getInput(i)->empty()) : false;
+            forwarded &= mInputs[i] ? !(getInput(i)->undefined()) : false;
         }
     }
     for (IOIndex_t i = 0; i < nbOutputs(); ++i) {
         // If getOutput(i) is nullptr, ignore this output (it may be a dummy
         // output in a MetaOperator)
-        forwarded &= (getOutput(i)) ? !(getOutput(i)->empty()) : true;
+        forwarded &= (getOutput(i)) ? !(getOutput(i)->undefined()) : true;
     }
     return forwarded;
 }
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 4184fc18abbc5490a1d6fbf7363fef817c7ecbc9..cc31eeea758853a4183569d58412c427bd32006c 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -31,7 +31,7 @@ void Aidge::Reshape_OpImpl::forward() {
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
 bool Aidge::Reshape_Op::dimsForwarded() const {
-    if (getInput(1) && !getInput(1)->empty()) {
+    if (getInput(1) && !getInput(1)->undefined()) {
         // output dims are data dependent
         return false;
     }
diff --git a/src/operator/Resize.cpp b/src/operator/Resize.cpp
index 966e1c3e032e64e75d3606fca022b84f9da8fbaf..0d407d4f97a17b8a89378bc83c1039423d9b2949 100644
--- a/src/operator/Resize.cpp
+++ b/src/operator/Resize.cpp
@@ -27,9 +27,9 @@ const std::string Aidge::Resize_Op::Type = "Resize";
 
 bool Aidge::Resize_Op::dimsForwarded() const {
     // in case of ROI add getInput(1) condition
-    if ((getInput(1) && !getInput(1)->empty())
-        || (getInput(2) && !getInput(2)->empty())
-        || (getInput(3) && !getInput(3)->empty())
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined())
+        || (getInput(3) && !getInput(3)->undefined())
         )
     {
         // output dims are data dependent
@@ -44,9 +44,9 @@ bool Aidge::Resize_Op::forwardDims(bool allowDataDependency) {
         AIDGE_ASSERT(getInput(0)->nbDims() == 4,
             "input tensor must have dimensions = 4 (batch, channel, height, width).");
 
-        const bool input1ROIPresent           = getInput(1) && !getInput(1)->empty();
-        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->empty();
-        const bool input3SizesPresent         = getInput(3) && !getInput(3)->empty();
+        const bool input1ROIPresent           = getInput(1) && !getInput(1)->undefined();
+        const bool input2ScalesPresent        = getInput(2) && !getInput(2)->undefined();
+        const bool input3SizesPresent         = getInput(3) && !getInput(3)->undefined();
 
         AIDGE_ASSERT(input2ScalesPresent != input3SizesPresent, "Only one of scales and  sizes can be specified.")
 
@@ -118,4 +118,4 @@ void Aidge::Resize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t de
     if(getInput(3)) {
         getInput(3)->setBackend(name, device);
     }
-}
\ No newline at end of file
+}
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 3cc2de686435a304326e2a4a60dad6c12a50349c..4fcfd587a9b3d8858b2e8a71605743c6702cb310 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -29,10 +29,10 @@
 const std::string Aidge::Slice_Op::Type = "Slice";
 
 bool Aidge::Slice_Op::dimsForwarded() const {
-    if ((getInput(1) && !getInput(1)->empty())
-        || (getInput(2) && !getInput(2)->empty())
-        || (getInput(3) && !getInput(3)->empty())
-        || (getInput(4) && !getInput(4)->empty()))
+    if ((getInput(1) && !getInput(1)->undefined())
+        || (getInput(2) && !getInput(2)->undefined())
+        || (getInput(3) && !getInput(3)->undefined())
+        || (getInput(4) && !getInput(4)->undefined()))
     {
         // output dims are data dependent
         return false;
diff --git a/src/operator/Split.cpp b/src/operator/Split.cpp
index a0cb049b19e9411daf65bbe2a10319c62b32c1b8..31de75e410afef98843d1f59a1221ecd3ba91832 100644
--- a/src/operator/Split.cpp
+++ b/src/operator/Split.cpp
@@ -55,7 +55,7 @@ void Aidge::Split_OpImpl::forward() {
 const std::string Aidge::Split_Op::Type = "Split";
 
 bool Aidge::Split_Op::dimsForwarded() const {
-    if ((getInput(1) && !getInput(1)->empty()))
+    if ((getInput(1) && !getInput(1)->undefined()))
     {
         // output dims are data dependent
         return false;
diff --git a/src/operator/Transpose.cpp b/src/operator/Transpose.cpp
index 69820a924105acc8bea817aecb90e0aa278fce06..30372e44f8f9641734fc1109bf03a64794383a3e 100644
--- a/src/operator/Transpose.cpp
+++ b/src/operator/Transpose.cpp
@@ -32,6 +32,7 @@ const std::string Aidge::Transpose_Op::Type = "Transpose";
 
 bool Aidge::Transpose_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
+        AIDGE_ASSERT(!getInput(0)->empty(), "Not applicable on scalars.");
         std::vector<DimSize_t> outputDims;
         for (std::size_t i = 0; i < outputDimsOrder().size(); ++i) {
             outputDims.push_back(getInput(0)->dims()[outputDimsOrder()[i]]);
diff --git a/unit_tests/data/Test_Tensor.cpp b/unit_tests/data/Test_Tensor.cpp
index 62e90dcbd7c20548019afae1a04f84b3e1d4484a..98d3193ffc56f78bb5274ebe0795a4d67d163d27 100644
--- a/unit_tests/data/Test_Tensor.cpp
+++ b/unit_tests/data/Test_Tensor.cpp
@@ -36,7 +36,7 @@ TEST_CASE("[core/data] Tensor(Construction)", "[Tensor][Constructor]") {
         Tensor T_default{};
         REQUIRE((
             (T_default.dataType() == DataType::Float32) &&
-            (T_default.size() == 1) &&
+            (T_default.size() == 0) &&
             (T_default.dims() == std::vector<DimSize_t>({})) &&
             (T_default.strides() == std::vector<DimSize_t>({1})) &&
             (T_default.getImpl() == nullptr) &&
diff --git a/unit_tests/operator/Test_ConcatImpl.cpp b/unit_tests/operator/Test_ConcatImpl.cpp
index 184c02d5208c99b903cf838784bb14fb65799111..fcdf3e8cc1bc07493cfa84608f200f9f334a29cc 100644
--- a/unit_tests/operator/Test_ConcatImpl.cpp
+++ b/unit_tests/operator/Test_ConcatImpl.cpp
@@ -18,6 +18,14 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
+    SECTION("Concat scalar inputs") {
+        std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(4);
+        auto myConcat = Concat(2, 0);
+        myConcat->getOperator()->associateInput(0, input1);
+        myConcat->getOperator()->associateInput(1, input2);
+        REQUIRE_THROWS(myConcat->forward());
+    }
     SECTION("Concat 1D inputs") {
         std::shared_ptr<Tensor> input1 = std::make_shared<Tensor>(Array1D<int,2>{{ 2, 3 }});
         std::shared_ptr<Tensor> input2 = std::make_shared<Tensor>(Array1D<int,3>{{ 4, 5, 6 }});
@@ -140,4 +148,4 @@ TEST_CASE("[cpu/operator] Concat(forward)", "[Concat][CPU]") {
 
         REQUIRE(*std::static_pointer_cast<OperatorTensor>(myConcat->getOperator())->getOutput(0) == *expectedOutput);
     }
-}
\ No newline at end of file
+}
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
index d11f72474b0b70bf335dfee95d13a9b41cfe6efb..cef7bc53ef7e9247e59077028a728e9b1bb2aebe 100644
--- a/unit_tests/operator/Test_Div_Op.cpp
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Div_Op(forwardDims)", "[Div][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
index d20f689aba55d8cbaef553388d4666fd6c1d7172..1d99fc7a513d0fa183fac786acee253a7cc97f10 100644
--- a/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
+++ b/unit_tests/operator/Test_GlobalAveragePooling_Op.cpp
@@ -46,9 +46,7 @@ TEST_CASE("[core/operator] GlobalAveragePooling_Op(forwardDims)",
   SECTION("Connected Inputs") {
     SECTION("empty tensor") {
       for (uint16_t trial = 0; trial < NB_TRIALS; ++trial) {
-        const std::size_t nb_dims = 0;
-        std::vector<std::size_t> dims(nb_dims);
-        input_T->resize(dims);
+        // Test that on undefined input it does not fail
         REQUIRE_NOTHROW(op->forwardDims());
       }
     }
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
index bdd1de87c27351e943c59fa616c40dc4a0001abc..102a4ab4ec7d6262bdcc05f0c56605dfcb6af89a 100644
--- a/unit_tests/operator/Test_MatMul_Op.cpp
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -33,24 +33,24 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
     /** @todo Special case of scalar Tensor objects.
      * Not handled yet.
     */
-    // SECTION("0-D / 0-D") {
-    //     std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
-    //     T0->resize({});
-    //     op -> associateInput(0,T0);
+    SECTION("0-D / 0-D") {
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        T0->resize({});
+        op -> associateInput(0,T0);
 
-    //     // input_1 - right
-    //     std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
-    //     T1->resize({});
-    //     op -> associateInput(1,T1);
+        // input_1 - right
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        T1->resize({});
+        op -> associateInput(1,T1);
 
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims()).empty());
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims()).empty());
 
-    //     // input_1 - wrong
-    //     T1->resize({dist(gen)});
+        // input_1 - wrong
+        T1->resize({dist(gen)});
 
-    //     REQUIRE_THROWS(op->forwardDims());
-    // }
+        REQUIRE_THROWS(op->forwardDims());
+    }
 
     SECTION("1-D / N-D") {
         // input_0
@@ -193,4 +193,4 @@ TEST_CASE("[core/operator] MatMul_Op(forwardDims)", "[MatMul][forwardDims]") {
         REQUIRE_THROWS(op -> forwardDims());
     }
 }
-} // namespace Aidge
\ No newline at end of file
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
index f3f8fb9522943d0a9574cb80cfc228135a973890..8efd1c2dcff0686dd3f1e589ceae6b0655c7937e 100644
--- a/unit_tests/operator/Test_Mul_Op.cpp
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Mul_Op(forwardDims)", "[Mul][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
index 4a8d242a355cda58c7b36914efdb1304220f713a..90b865c1d9bd19e3fce51c2af477a9cde16e33bd 100644
--- a/unit_tests/operator/Test_Pow_Op.cpp
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Pow_Op(forwardDims)", "[Pow][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
index 329f3da798854ddff3d1c1393d60c57ef180c70a..0797def124a6bbb97c4f15ae98a310a46d313181 100644
--- a/unit_tests/operator/Test_Sub_Op.cpp
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -44,54 +44,54 @@ TEST_CASE("[core/operator] Sub_Op(forwardDims)", "[Sub][forwardDims]") {
      * @todo Special case: scalar not handled yet by
      * ``OperatorTensor::forwardDims()``
      */
-    // SECTION("Scalar / Scalar") {
-    //     // input_0
-    //     T0->resize({});
-
-    //     // input_1
-    //     T1->resize({});
-
-    //     REQUIRE_NOTHROW(op->forwardDims());
-    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
-    // }
-    // SECTION("Scalar / +1-D") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_0
-    //     T0->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_1
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T1->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
-    // SECTION("+1-D / Scalar") {
-    //     // a scalar is compatible with any other Tensor
-    //     // input_1
-    //     T1->resize({});
-
-    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
-
-    //         // input_0
-    //         const std::size_t nb_dims = nbDimsDist(gen);
-    //         std::vector<std::size_t> dims(nb_dims);
-    //         for (std::size_t i = 0; i < nb_dims; ++i) {
-    //             dims[i] = dimsDist(gen);
-    //         }
-    //         T0->resize(dims);
-
-    //         REQUIRE_NOTHROW(op->forwardDims());
-    //         REQUIRE((op->getOutput(0)->dims()) == dims);
-    //     }
-    // }
+    SECTION("Scalar / Scalar") {
+        // input_0
+        T0->resize({});
+
+        // input_1
+        T1->resize({});
+
+        REQUIRE_NOTHROW(op->forwardDims());
+        REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    }
+    SECTION("Scalar / +1-D") {
+        // a scalar is compatible with any other Tensor
+        // input_0
+        T0->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_1
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T1->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
+    SECTION("+1-D / Scalar") {
+        // a scalar is compatible with any other Tensor
+        // input_1
+        T1->resize({});
+
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+            // input_0
+            const std::size_t nb_dims = nbDimsDist(gen);
+            std::vector<std::size_t> dims(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims[i] = dimsDist(gen);
+            }
+            T0->resize(dims);
+
+            REQUIRE_NOTHROW(op->forwardDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims);
+        }
+    }
     SECTION("+1-D / +1-D") {
         // same size
         for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
diff --git a/unit_tests/operator/Test_TransposeImpl.cpp b/unit_tests/operator/Test_TransposeImpl.cpp
index 8b6eafc70b7eefec6e1ccab9d0cfcde1eb4a09d5..18f0d68d87ac1ee66ffb1f24c4c130f9b020d56e 100644
--- a/unit_tests/operator/Test_TransposeImpl.cpp
+++ b/unit_tests/operator/Test_TransposeImpl.cpp
@@ -18,6 +18,16 @@
 using namespace Aidge;
 
 TEST_CASE("[cpu/operator] Transpose(forward)") {
+    SECTION("Scalar Tensor") {
+        std::shared_ptr<Tensor> input = std::make_shared<Tensor>(2);
+        std::shared_ptr<Tensor> output = std::make_shared<Tensor>(2);
+        std::shared_ptr<Node> myTranspose = Transpose({});
+        auto op = std::static_pointer_cast<OperatorTensor>(myTranspose -> getOperator());
+        op->associateInput(0,input);
+        op->setDataType(DataType::Float32);
+        op->setBackend("cpu");
+        REQUIRE_THROWS(myTranspose->forward());
+    }
     SECTION("3D Tensor") {
         std::shared_ptr<Tensor> input = std::make_shared<Tensor>(Array3D<float,2,3,4> {
             {
@@ -120,4 +130,4 @@ TEST_CASE("[cpu/operator] Transpose(forward)") {
 
         REQUIRE(*(op->getOutput(0)) == *output);
     }
-}
\ No newline at end of file
+}