diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 8539c8e36da86e7d15943f2a72826313858751ea..12551e71cd646564321eeb23f64bf68d77a8886c 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -12,8 +12,12 @@
 #ifndef AIDGE_TENSORIMPL_H_
 #define AIDGE_TENSORIMPL_H_
 
-#include <cstddef>
-#include <cstdio>
+#include <numeric>     // std::accumulate
+#include <cstddef>     // std::size_t
+#include <functional>  // std::multiplies
+#include <vector>
+#include <utility>     // std::pair, std::make_pair
+
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -59,23 +63,42 @@ private:
 */
 
 /**
- * This class manages the raw data storage of a Tensor and provide generic copy
+ * @class TensorImpl
+ * @brief Class to manage the raw data storage of a Tensor and provide generic copy
  * primitives from other devices and from/to host.
- * It can own the data or not (use setRawPtr() to set an external data owner).
- * It only knows the data type and data capacity, but does not handle anything else.
+ * @note It can own the data or not (use ``setRawPtr()`` to set an external data owner).
+ * @note It only knows the data type and data capacity, but does not handle anything else.
 */
 class TensorImpl {
+protected:
+
+    const char *mBackend;
+    /// @brief Device id.
+    const DeviceIdx_t mDevice;
+    /// Number of elements (to be) stored.
+    NbElts_t mNbElts;
+
 public:
     TensorImpl() = delete;
-    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) 
+
+    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
+        : mBackend(backend),
+          mDevice(device)
     {
         resize(dims);
     };
 
+    virtual ~TensorImpl() = default;
+
+    virtual bool operator==(const TensorImpl &othImpl) const = 0;
+
+public:
     /**
      * Return the (backend, device) pair for this implementation.
     */
-    std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); }
+    std::pair<std::string, DeviceIdx_t> device() const noexcept {
+        return std::make_pair(std::string(mBackend), mDevice);
+    }
 
     /**
      * Copy data from the same device.
@@ -151,11 +174,7 @@ public:
      * Set the size, in number of elements, that must be stored.
     */
     virtual void resize(std::vector<DimSize_t> dims) {
-        size_t product = 1;
-        for (size_t num : dims) {
-            product *= num;
-        }
-        mNbElts = product;
+        mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
     }
 
     /**
@@ -168,23 +187,15 @@ public:
     */
     virtual std::size_t scalarSize() const noexcept = 0;
     constexpr const char *backend() const { return mBackend; }
-    virtual ~TensorImpl() = default;
-    virtual bool operator==(const TensorImpl &othImpl) const = 0;
 
     /**
-     * Copy from another backend.
+     * @brief Copy from another backend.
      * @param srcImpl Source TensorImpl to copy from.
      * @param length Number of elements of size scalarSize() to copy
      * @param srcOffset Source offset (in number of elements).
      * @param dstOffset Destination offset (in number of elements).
     */
     void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0);
-
-protected:
-    const char *mBackend;
-    const DeviceIdx_t mDevice;
-    /// Number of elements (to be) stored
-    NbElts_t mNbElts;
 };
 
 } // namespace Aidge
diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
index 47e3b07e8fa08cdcd714745a9a49bb03e30f79f5..b3e0fd967457585e7e3719f92dde7d6d93eee903 100644
--- a/include/aidge/backend/cpu/data/GetCPUPtr.h
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -12,13 +12,16 @@
 #ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
 #define AIDGE_CPU_DATA_GETCPUPTR_H_
 
+#include <cstddef>
+#include <memory>
+
 #include "aidge/data/Tensor.hpp"
 
 namespace Aidge {
-inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
+inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data, const std::size_t offset = 0) {
   const auto tensor = std::static_pointer_cast<Tensor>(data);
-  return tensor->getImpl()->hostPtr(tensor->getImplOffset());
+  return tensor->getImpl()->hostPtr(tensor->getImplOffset() + offset);
 }
 } // namespace Aidge
 
-#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
+#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 641c44c03b4e33f210e53f7822dd2d26c5a7d32f..3ccd55d3f19b3cff70c1a100d980ae63213261c5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -17,6 +17,7 @@
 #include <memory>
 #include <numeric>   // std::accumulate
 #include <string>
+#include <type_traits>  // std::is_arithmetic
 #include <vector>
 
 #include "aidge/backend/TensorImpl.hpp"
@@ -99,6 +100,17 @@ class Tensor : public Data,
         return newTensor;
     }
 
+    template<typename T,
+            typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor(T val)
+        : Data(Type),
+          mDataType(NativeType<VT>::type),
+          mDims({}), mStrides({1}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
+          mSize(1) {
+        *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
+    }
+
     /**
      * @brief Construct a new Tensor object from the 1-dimension Array helper.
      * @tparam T datatype
@@ -291,7 +303,7 @@ class Tensor : public Data,
      * @brief Get the data type enum.
      * @return constexpr DataType
      */
-    constexpr DataType dataType() const { return mDataType; }
+    constexpr DataType dataType() const noexcept { return mDataType; }
 
     /**
      * @brief Set the DataType of the Tensor and converts data
@@ -334,7 +346,7 @@ class Tensor : public Data,
      * @return true
      * @return false
      */
-    bool hasImpl() const { return (mImpl) ? true : false; }
+    bool hasImpl() const noexcept { return mImpl ? true : false; }
 
     /**
      * @brief Get number of dimensions of the Tensor.
@@ -369,13 +381,13 @@ class Tensor : public Data,
      * @brief Return true if Tensor is contiguous in memory.
      * @return bool
      */
-    constexpr bool isContiguous() const { return mContiguous; }
+    constexpr bool isContiguous() const noexcept { return mContiguous; }
 
     /**
      * @brief Get the number of elements in the Tensor object.
      * @return constexpr std::size_t
      */
-    constexpr std::size_t size() const { return mSize; }
+    constexpr std::size_t size() const noexcept { return mSize; }
 
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 9aed8299a67ab719141b6fe199ebf3f52fb7d387..97a4ef69bd371e80c4e63303feac5e64197670b3 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -68,13 +68,7 @@ public:
     // }
 
 
-    // void checkDims() const override final {
-    //     assert(outputDimsForwarded());
-    //     for (const auto& in : mInputs) {
-    //         assert(in->dims() == mOutputs[0]->dims());
-    //     }
-    // }
-
+    void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
         mImpl = Registrar<Add_Op>::create(name)(*this);
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 94b755e0fdb0f76d54cd4f046fb8b08dda05b6b2..a033c6920a374003ad869bddbf5641c48fc5f6e2 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -60,7 +60,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 78b2fa5f98c9dae66ae291769f2de08d7805a738..8758021a9c3de1707a96bbfafc21686ded8b7e40 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -62,7 +62,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index d498cacc7c5b2ddc3269f3ebc77707aead8eb52d..ba8d3d05877f9aa543518fff1d88f4e8a436b712 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -60,7 +60,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index ee5efa24dc24ebcd5ad4c45491c968caf691eee9..7d346457ead71724ba05da70b5bdf7ad145cbe0c 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -65,7 +65,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 4e638fd86da487565a89760925e45339213fa8f9..a54302d06059d43336800d81e4d18744b6243785 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -9,8 +9,53 @@
  *
  ********************************************************************************/
 
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
+#include <vector>
 
 #include "aidge/operator/Add.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Add_Op::Type = "Add";
\ No newline at end of file
+const std::string Aidge::Add_Op::Type = "Add";
+
+void Aidge::Add_Op::computeOutputDims() {
+    // check inputs have been associated
+    bool associated = (nbInputs() > 0); // do not compute anything if no input
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
+        for (std::size_t i = 0; i < nbInputs(); i++) {
+            inputsDims[i] = getInput(i)->dims();
+        }
+
+        std::size_t outNbDims = 1;
+        for(std::size_t i = 0; i < nbInputs(); ++i) {
+            outNbDims = (inputsDims[i].size() > outNbDims) ? inputsDims[i].size() : outNbDims;
+        }
+
+        std::vector<std::size_t> outDims(outNbDims, 1);
+
+        for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) {
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if(!inputsDims[i].empty()) {
+                    const std::size_t dim = inputsDims[i].back();
+                    inputsDims[i].pop_back();
+                    if (*it == 1) {
+                        *it = dim;
+                    }
+                    else if ((dim != *it) && (dim != 1)) {
+                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation");
+                    }
+                }
+            }
+        }
+        mOutputs[0]->resize(outDims);
+    }
+}
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 85db3ac6ef66c837c86dbece288185deaca88ba6..6b55338f4ab7ac9131231fcced21869274c1bd47 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -9,11 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Div.hpp"
@@ -28,11 +27,27 @@ void Aidge::Div_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // div by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // div elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // div by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index bc268263e8a6e2ec7c9944faa31da84dc50c4f53..e6d8b017c0c3e5effb43dd789b569f283154e80d 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Mul.hpp"
@@ -27,11 +27,27 @@ void Aidge::Mul_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // mul by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // mul elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // mul by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index de1f0c3694f51fbd5b365573f61d3e3e2b9109ff..5e29eae0c0f42e7d566a933e9409766026369dad 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Pow.hpp"
@@ -27,11 +27,27 @@ void Aidge::Pow_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // pow by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // pow elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // pow by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 639eaf798c1c2a9a6685e8b8d2c4a2cb00a4b57a..9d933bf6c97348842fae8f405d3e709e68d56916 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Sub.hpp"
@@ -27,11 +27,27 @@ void Aidge::Sub_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // sub by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // sub elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // sub by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e659742c0bd200fa33b598f581cfef7b2f1e432e
--- /dev/null
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Div Operator
+    std::shared_ptr<Node> myDiv = Div();
+    auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d3e0c5e086fac9d31db817d628214e95d4e41a32
--- /dev/null
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Mul Operator
+    std::shared_ptr<Node> myMul = Mul();
+    auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c77615c11e99c174707df21560044fdd3b6a3c42
--- /dev/null
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Pow Operator
+    std::shared_ptr<Node> myPow = Pow();
+    auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b7b744410d31ea32dea5a15cc7a29da093488d14
--- /dev/null
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Sub Operator
+    std::shared_ptr<Node> mySub = Sub();
+    auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge