diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index 9e0e457b49fe40b2a6e9e3ce5c5e4b77bee1d93e..6c4ca93ce28c0a8c769606f07b1badee676423fd 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -70,6 +70,7 @@
 #include "aidge/utils/Attributes.hpp"
 #include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/DynamicAttributes.hpp"
+#include "aidge/utils/Random.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
 
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 545e6c672705fe16186d41f9c46cde94b3f3ab7e..509c11691047604fbce959cfb29649aac75b5a1e 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -12,8 +12,12 @@
 #ifndef AIDGE_TENSORIMPL_H_
 #define AIDGE_TENSORIMPL_H_
 
-#include <cstddef>
-#include <cstdio>
+#include <numeric>     // std::accumulate
+#include <cstddef>     // std::size_t
+#include <functional>  // std::multiplies
+#include <vector>
+#include <utility>     // std::pair, std::make_pair
+
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -59,23 +63,42 @@ private:
 */
 
 /**
- * This class manages the raw data storage of a Tensor and provide generic copy
+ * @class TensorImpl
+ * @brief Class to manage the raw data storage of a Tensor and provide generic copy
  * primitives from other devices and from/to host.
- * It can own the data or not (use setRawPtr() to set an external data owner).
- * It only knows the data type and data capacity, but does not handle anything else.
+ * @note It can own the data or not (use ``setRawPtr()`` to set an external data owner).
+ * @note It only knows the data type and data capacity, but does not handle anything else.
 */
 class TensorImpl {
+protected:
+
+    const char *mBackend;
+    /// @brief Device id.
+    const DeviceIdx_t mDevice;
+    /// Number of elements (to be) stored.
+    NbElts_t mNbElts;
+
 public:
     TensorImpl() = delete;
-    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims) : mBackend(backend), mDevice(device) 
+
+    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
+        : mBackend(backend),
+          mDevice(device)
     {
         resize(dims);
     };
 
+    virtual ~TensorImpl() = default;
+
+    virtual bool operator==(const TensorImpl &othImpl) const = 0;
+
+public:
     /**
      * Return the (backend, device) pair for this implementation.
     */
-    std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); }
+    std::pair<std::string, DeviceIdx_t> device() const noexcept {
+        return std::make_pair(std::string(mBackend), mDevice);
+    }
 
     /**
      * Copy data from the same device.
@@ -151,11 +174,7 @@ public:
      * Set the size, in number of elements, that must be stored.
     */
     virtual void resize(std::vector<DimSize_t> dims) {
-        size_t product = 1;
-        for (size_t num : dims) {
-            product *= num;
-        }
-        mNbElts = product;
+        mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
     }
 
     /**
@@ -168,23 +187,15 @@ public:
     */
     virtual std::size_t scalarSize() const noexcept = 0;
     constexpr const char *backend() const { return mBackend; }
-    virtual ~TensorImpl() = default;
-    virtual bool operator==(const TensorImpl &othImpl) const = 0;
 
     /**
-     * Copy from another backend.
+     * @brief Copy from another backend.
      * @param srcImpl Source TensorImpl to copy from.
      * @param length Number of elements of size scalarSize() to copy
      * @param srcOffset Source offset (in number of elements).
      * @param dstOffset Destination offset (in number of elements).
     */
     void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0);
-
-protected:
-    const char *mBackend;
-    const DeviceIdx_t mDevice;
-    /// Number of elements (to be) stored
-    NbElts_t mNbElts;
 };
 
 } // namespace Aidge
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
index 5c7a1c73ce4ad4eb512a446879cb1ad9b673eb2f..62d10a6983e8cf5fd8e2730d3203bed97284e336 100644
--- a/include/aidge/data/DataProvider.hpp
+++ b/include/aidge/data/DataProvider.hpp
@@ -20,8 +20,6 @@
 #include "aidge/data/Database.hpp"
 #include "aidge/data/Data.hpp"
 
-
-
 namespace Aidge {
 
 /**
@@ -33,14 +31,35 @@ class DataProvider {
 private:
     // Dataset providing the data to the dataProvider
     const Database& mDatabase;
+    
+    // Desired size of the produced batches
+    const std::size_t mBatchSize;
+
+    // Enable random shuffling for learning
+    const bool mShuffle;
 
+    // Drops the last non-full batch
+    const bool mDropLast;
+
+    // Number of modality in one item
     const std::size_t mNumberModality;
-    std::vector<std::vector<std::size_t>> mDataSizes;
-    std::vector<std::string> mDataBackends;
-    std::vector<DataType> mDataTypes;
 
-    // Desired size of the produced batches
-    const std::size_t mBatchSize;
+    // mNbItems contains the number of items in the database
+    std::size_t mNbItems;
+    // mBatches contains the call order of each database item
+    std::vector<unsigned int> mBatches; 
+    // mIndex browsing the number of batch
+    std::size_t mIndexBatch;
+
+    // mNbBatch contains the number of batch
+    std::size_t mNbBatch;
+    // Size of the Last batch
+    std::size_t mLastBatchSize;
+
+    // Store each modality dimensions, backend and type
+    std::vector<std::vector<std::size_t>> mDataDims;
+    std::vector<std::string> mDataBackends;
+    std::vector<DataType> mDataTypes; 
 
 public:
     /**
@@ -48,15 +67,76 @@ public:
      * @param database database from which to load the data.
      * @param batchSize number of data samples per batch.
      */
-    DataProvider(const Database& database, const std::size_t batchSize);
+    DataProvider(const Database& database, const std::size_t batchSize, const bool shuffle = false, const bool dropLast = false);
 
 public:
     /**
-     * @brief Create a batch for each data modality in the database. The returned batch contain the data as sorted in the database.
-     * @param startIndex the starting index in the database to start the batch from.
+     * @brief Create a batch for each data modality in the database.
      * @return a vector of tensors. Each tensor is a batch corresponding to one modality.
      */
-    std::vector<std::shared_ptr<Tensor>> readBatch(const std::size_t startIndex) const;
+    std::vector<std::shared_ptr<Tensor>> readBatch() const;
+
+    /**
+     * @brief Get the Number of Batch.
+     * 
+     * @return std::size_t 
+     */
+    inline std::size_t getNbBatch(){
+        return mNbBatch;
+    };
+
+    /**
+     * @brief Get the current Index Batch.
+     * 
+     * @return std::size_t 
+     */
+    inline std::size_t getIndexBatch(){
+        return mIndexBatch;
+    };
+
+    /**
+     * @brief Reset the internal index batch that browses the data of the database to zero.
+     */
+    inline void resetIndexBatch(){
+        mIndexBatch = 0;
+    };
+
+    /**
+     * @brief Increment the internal index batch that browses the data of the database.
+     */
+    inline void incrementIndexBatch(){
+        ++mIndexBatch;
+    };
+
+    /**
+     * @brief Setup the batches for one pass on the database.
+     */
+    void setBatches();
+
+    /**
+     * @brief End condition of dataProvider for one pass on the database.
+     * 
+     * @return true when all batch were fetched, False otherwise
+     */
+    inline bool done(){
+        return (mIndexBatch == mNbBatch);
+    };
+
+
+    // Functions for python iterator iter and next (definition in pybind.cpp)
+    /**
+     * @brief __iter__ method for iterator protocol
+     * 
+     * @return DataProvider* 
+     */
+    DataProvider* iter();
+
+    /**
+     * @brief __next__ method for iterator protocol
+     * 
+     * @return std::vector<std::shared_ptr<Aidge::Tensor>> 
+     */
+    std::vector<std::shared_ptr<Aidge::Tensor>> next();
 };
 
 } // namespace Aidge
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 641c44c03b4e33f210e53f7822dd2d26c5a7d32f..3ccd55d3f19b3cff70c1a100d980ae63213261c5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -17,6 +17,7 @@
 #include <memory>
 #include <numeric>   // std::accumulate
 #include <string>
+#include <type_traits>  // std::is_arithmetic
 #include <vector>
 
 #include "aidge/backend/TensorImpl.hpp"
@@ -99,6 +100,17 @@ class Tensor : public Data,
         return newTensor;
     }
 
+    template<typename T,
+            typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor(T val)
+        : Data(Type),
+          mDataType(NativeType<VT>::type),
+          mDims({}), mStrides({1}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
+          mSize(1) {
+        *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
+    }
+
     /**
      * @brief Construct a new Tensor object from the 1-dimension Array helper.
      * @tparam T datatype
@@ -291,7 +303,7 @@ class Tensor : public Data,
      * @brief Get the data type enum.
      * @return constexpr DataType
      */
-    constexpr DataType dataType() const { return mDataType; }
+    constexpr DataType dataType() const noexcept { return mDataType; }
 
     /**
      * @brief Set the DataType of the Tensor and converts data
@@ -334,7 +346,7 @@ class Tensor : public Data,
      * @return true
      * @return false
      */
-    bool hasImpl() const { return (mImpl) ? true : false; }
+    bool hasImpl() const noexcept { return mImpl ? true : false; }
 
     /**
      * @brief Get number of dimensions of the Tensor.
@@ -369,13 +381,13 @@ class Tensor : public Data,
      * @brief Return true if Tensor is contiguous in memory.
      * @return bool
      */
-    constexpr bool isContiguous() const { return mContiguous; }
+    constexpr bool isContiguous() const noexcept { return mContiguous; }
 
     /**
      * @brief Get the number of elements in the Tensor object.
      * @return constexpr std::size_t
      */
-    constexpr std::size_t size() const { return mSize; }
+    constexpr std::size_t size() const noexcept { return mSize; }
 
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 9aed8299a67ab719141b6fe199ebf3f52fb7d387..97a4ef69bd371e80c4e63303feac5e64197670b3 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -68,13 +68,7 @@ public:
     // }
 
 
-    // void checkDims() const override final {
-    //     assert(outputDimsForwarded());
-    //     for (const auto& in : mInputs) {
-    //         assert(in->dims() == mOutputs[0]->dims());
-    //     }
-    // }
-
+    void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
         mImpl = Registrar<Add_Op>::create(name)(*this);
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 94b755e0fdb0f76d54cd4f046fb8b08dda05b6b2..a033c6920a374003ad869bddbf5641c48fc5f6e2 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -60,7 +60,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 78b2fa5f98c9dae66ae291769f2de08d7805a738..8758021a9c3de1707a96bbfafc21686ded8b7e40 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -62,7 +62,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index d498cacc7c5b2ddc3269f3ebc77707aead8eb52d..ba8d3d05877f9aa543518fff1d88f4e8a436b712 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -60,7 +60,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index ee5efa24dc24ebcd5ad4c45491c968caf691eee9..7d346457ead71724ba05da70b5bdf7ad145cbe0c 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -65,7 +65,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/utils/Random.hpp b/include/aidge/utils/Random.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..704609c0c778c7065a580b86fc67aea7e9d3525d
--- /dev/null
+++ b/include/aidge/utils/Random.hpp
@@ -0,0 +1,31 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+
+#ifndef AIDGE_RANDOM_H_
+#define AIDGE_RANDOM_H_
+
+
+#include <algorithm>
+#include <vector>
+#include <random>
+
+namespace Random {
+
+    void randShuffle(std::vector<unsigned int>& vec) {
+        std::random_device rd;
+        std::mt19937 g(rd());
+        std::shuffle(vec.begin(), vec.end(), g);
+    }
+
+}
+
+#endif //AIDGE_RANDOM_H_
\ No newline at end of file
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
index dfdf188946673c4e2a7ea2dc0829312758d80f96..2f652aff5008f8008952ffb1bb6fb1738021b436 100644
--- a/python_binding/data/pybind_DataProvider.cpp
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -4,19 +4,33 @@
 #include "aidge/data/Database.hpp"
 
 namespace py = pybind11;
+
 namespace Aidge {
 
+// __iter__ method for iterator protocol
+DataProvider* DataProvider::iter(){
+    resetIndexBatch();
+    setBatches();
+    return this;
+}
+
+// __next__ method for iterator protocol
+std::vector<std::shared_ptr<Aidge::Tensor>> DataProvider::next() {
+    if (!done()){
+        incrementIndexBatch();
+        return readBatch();
+    } else {
+        throw py::stop_iteration();
+    }
+}
+
 void init_DataProvider(py::module& m){
 
     py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
-          .def(py::init<Database&, std::size_t>(), py::arg("database"), py::arg("batchSize"))
-          .def("read_batch", &DataProvider::readBatch, py::arg("start_index"),
-          R"mydelimiter(
-          Return a batch of each data modality.
-
-          :param start_index: Database starting index to read the batch from
-          :type start_index: int
-          )mydelimiter");
+        .def(py::init<Database&, std::size_t, bool, bool>(), py::arg("database"), py::arg("batch_size"), py::arg("shuffle"), py::arg("drop_last"))
+        .def("__iter__", &DataProvider::iter)
+        .def("__next__", &DataProvider::next)
+        .def("__len__", &DataProvider::getNbBatch);
     
 }
 }
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index e4b8da920114f2fff799b6414a2f8ba3b0515f6f..85eda8f663146d106e11a21eb0751d456b06aef3 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -96,10 +96,18 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<double>(idx));
             case DataType::Float32:
                 return py::cast(b.get<float>(idx));
+            case DataType::Int8:
+                return py::cast(b.get<std::int8_t>(idx));
+            case DataType::Int16:
+                return py::cast(b.get<std::int16_t>(idx));
             case DataType::Int32:
                 return py::cast(b.get<std::int32_t>(idx));
             case DataType::Int64:
                 return py::cast(b.get<std::int64_t>(idx));
+            case DataType::UInt8:
+                return py::cast(b.get<std::uint8_t>(idx));
+            case DataType::UInt16:
+                return py::cast(b.get<std::uint16_t>(idx));
             default:
                 return py::none();
         }
@@ -111,10 +119,18 @@ void init_Tensor(py::module& m){
                 return py::cast(b.get<double>(coordIdx));
             case DataType::Float32:
                 return py::cast(b.get<float>(coordIdx));
+            case DataType::Int8:
+                return py::cast(b.get<std::int8_t>(coordIdx));
+            case DataType::Int16:
+                return py::cast(b.get<std::int16_t>(coordIdx));
             case DataType::Int32:
                 return py::cast(b.get<std::int32_t>(coordIdx));
             case DataType::Int64:
                 return py::cast(b.get<std::int64_t>(coordIdx));
+            case DataType::UInt8:
+                return py::cast(b.get<std::uint8_t>(coordIdx));
+            case DataType::UInt16:
+                return py::cast(b.get<std::uint16_t>(coordIdx));
             default:
                 return py::none();
         }
@@ -141,6 +157,12 @@ void init_Tensor(py::module& m){
                 break;
             case DataType::Float32:
                 dataFormatDescriptor = py::format_descriptor<float>::format();
+                break;;
+            case DataType::Int8:
+                dataFormatDescriptor = py::format_descriptor<std::int8_t>::format();
+                break;
+            case DataType::Int16:
+                dataFormatDescriptor = py::format_descriptor<std::int16_t>::format();
                 break;
             case DataType::Int32:
                 dataFormatDescriptor = py::format_descriptor<std::int32_t>::format();
@@ -148,6 +170,12 @@ void init_Tensor(py::module& m){
             case DataType::Int64:
                 dataFormatDescriptor = py::format_descriptor<std::int64_t>::format();
                 break;
+            case DataType::UInt8:
+                dataFormatDescriptor = py::format_descriptor<std::uint8_t>::format();
+                break;
+            case DataType::UInt16:
+                dataFormatDescriptor = py::format_descriptor<std::uint16_t>::format();
+                break;
             default:
                 throw py::value_error("Unsupported data format");
         }
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
index dffb5745d9e324856548387069bcf1d5ff6a7b48..7783ed86cf4ae1d8672cc6a35a97ca9a996457b6 100644
--- a/src/data/DataProvider.cpp
+++ b/src/data/DataProvider.cpp
@@ -13,45 +13,56 @@
 #include <cstddef>  // std::size_t
 #include <memory>
 #include <vector>
+#include <cmath>
+
 
 #include "aidge/data/Database.hpp"
 #include "aidge/data/DataProvider.hpp"
 #include "aidge/data/Tensor.hpp"
 
+#include "aidge/utils/Random.hpp"
+
 
-Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize)
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize, const bool shuffle, const bool dropLast)
     : mDatabase(database),
+      mBatchSize(batchSize),
+      mShuffle(shuffle),
+      mDropLast(dropLast),
       mNumberModality(database.getItem(0).size()),
-      mBatchSize(batchSize)
+      mNbItems(mDatabase.getLen()),
+      mIndexBatch(0)
 {
     // Iterating on each data modality in the database
     // Get the tensor dimensions, datatype and backend of each modality to ensure each data have the same
     for (const auto& modality : mDatabase.getItem(0)) {
-        mDataSizes.push_back(modality->dims());
+        mDataDims.push_back(modality->dims());
         // assert(std::strcmp(item[i]->getImpl()->backend(), "cpu") == 0 && "DataProvider currently only supports cpu backend tensors");
-        // mDataBackends.push_back(item[i]->getImpl()->backend());
         mDataTypes.push_back(modality->dataType());
     }
+
+    // Compute the number of bacthes depending on mDropLast boolean
+    mNbBatch = (mDropLast) ? 
+                static_cast<std::size_t>(std::floor(mNbItems / mBatchSize)) : 
+                static_cast<std::size_t>(std::ceil(mNbItems / mBatchSize));
 }
 
-std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const std::size_t startIndex) const
+std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch() const
 {
-    assert((startIndex) <= mDatabase.getLen() && " DataProvider readBatch : database fetch out of bounds");
-
-
-    // Determine the batch size (may differ for the last batch)
-    const std::size_t current_batch_size = ((startIndex + mBatchSize) > mDatabase.getLen()) ?
-                                            mDatabase.getLen()-startIndex :
-                                            mBatchSize;
+    AIDGE_ASSERT(mIndexBatch <= mNbBatch, "Cannot fetch more data than available in database");
+    std::size_t current_batch_size;
+    if (mIndexBatch == mNbBatch) {
+        current_batch_size = mLastBatchSize;
+    } else {
+        current_batch_size = mBatchSize;
+    }
 
     // Create batch tensors (dimensions, backends, datatype) for each modality
     std::vector<std::shared_ptr<Tensor>> batchTensors;
-    auto dataBatchSize = mDataSizes;
+    auto dataBatchDims = mDataDims;
     for (std::size_t i = 0; i < mNumberModality; ++i) {
-        dataBatchSize[i].insert(dataBatchSize[i].begin(), current_batch_size);
+        dataBatchDims[i].insert(dataBatchDims[i].begin(), current_batch_size);
         auto batchData = std::make_shared<Tensor>();
-        batchData->resize(dataBatchSize[i]);
-        // batchData->setBackend(mDataBackends[i]);
+        batchData->resize(dataBatchDims[i]);
         batchData->setBackend("cpu");
         batchData->setDataType(mDataTypes[i]);
         batchTensors.push_back(batchData);
@@ -60,7 +71,8 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const
     // Call each database item and concatenate each data modularity in the batch tensors
     for (std::size_t i = 0; i < current_batch_size; ++i){
 
-        auto dataItem = mDatabase.getItem(startIndex+i);
+        auto dataItem = mDatabase.getItem(mBatches[(mIndexBatch-1)*mBatchSize+i]);
+        // auto dataItem = mDatabase.getItem(startIndex+i);
         // assert same number of modalities
         assert(dataItem.size() == mNumberModality && "DataProvider readBatch : item from database have inconsistent number of modality.");
 
@@ -69,7 +81,7 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const
             auto dataSample = dataItem[j];
 
             // Assert tensor sizes
-            assert(dataSample->dims() == mDataSizes[j] && "DataProvider readBatch : corrupted Data size");
+            assert(dataSample->dims() == mDataDims[j] && "DataProvider readBatch : corrupted Data size");
 
             // Assert implementation backend
             // assert(dataSample->getImpl()->backend() == mDataBackends[j] && "DataProvider readBatch : corrupted data backend");
@@ -82,4 +94,31 @@ std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const
         }
     }
     return batchTensors;
-}
\ No newline at end of file
+}
+
+
+void Aidge::DataProvider::setBatches(){
+    
+    mBatches.clear();
+    mBatches.resize(mNbItems);
+    std::iota(mBatches.begin(),
+              mBatches.end(),
+              0U);
+
+    if (mShuffle){
+        Random::randShuffle(mBatches);
+    }
+
+    if (mNbItems % mBatchSize !=0){ // The last batch is not full
+        std::size_t lastBatchSize = static_cast<std::size_t>(mNbItems % mBatchSize);
+        if (mDropLast){ // Remove the last non-full batch
+            AIDGE_ASSERT(lastBatchSize <= mBatches.size(), "Last batch bigger than the size of database");
+            mBatches.erase(mBatches.end() - lastBatchSize, mBatches.end());
+            mLastBatchSize = mBatchSize;
+        } else { // Keep the last non-full batch
+            mLastBatchSize = lastBatchSize;
+        }
+    } else { // The last batch is full
+        mLastBatchSize = mBatchSize;
+    }
+}
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 4e638fd86da487565a89760925e45339213fa8f9..a54302d06059d43336800d81e4d18744b6243785 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -9,8 +9,53 @@
  *
  ********************************************************************************/
 
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
+#include <vector>
 
 #include "aidge/operator/Add.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Add_Op::Type = "Add";
\ No newline at end of file
+const std::string Aidge::Add_Op::Type = "Add";
+
+void Aidge::Add_Op::computeOutputDims() {
+    // check inputs have been associated
+    bool associated = (nbInputs() > 0); // do not compute anything if no input
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
+        for (std::size_t i = 0; i < nbInputs(); i++) {
+            inputsDims[i] = getInput(i)->dims();
+        }
+
+        std::size_t outNbDims = 1;
+        for(std::size_t i = 0; i < nbInputs(); ++i) {
+            outNbDims = (inputsDims[i].size() > outNbDims) ? inputsDims[i].size() : outNbDims;
+        }
+
+        std::vector<std::size_t> outDims(outNbDims, 1);
+
+        for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) {
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if(!inputsDims[i].empty()) {
+                    const std::size_t dim = inputsDims[i].back();
+                    inputsDims[i].pop_back();
+                    if (*it == 1) {
+                        *it = dim;
+                    }
+                    else if ((dim != *it) && (dim != 1)) {
+                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation");
+                    }
+                }
+            }
+        }
+        mOutputs[0]->resize(outDims);
+    }
+}
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 85db3ac6ef66c837c86dbece288185deaca88ba6..6b55338f4ab7ac9131231fcced21869274c1bd47 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -9,11 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Div.hpp"
@@ -28,11 +27,27 @@ void Aidge::Div_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // div by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // div elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // div by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 54cda11625bac4f8f69e0423daba6a500024491d..d4a594e95b2695b496fc28b8e8a7fcf3442e9253 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Mul.hpp"
@@ -27,12 +27,28 @@ void Aidge::Mul_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // mul by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // mul elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // mul by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
     else if (!getInput(0)->empty() && !getInput(1)->empty()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index de1f0c3694f51fbd5b365573f61d3e3e2b9109ff..5e29eae0c0f42e7d566a933e9409766026369dad 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Pow.hpp"
@@ -27,11 +27,27 @@ void Aidge::Pow_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // pow by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // pow elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // pow by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 639eaf798c1c2a9a6685e8b8d2c4a2cb00a4b57a..9d933bf6c97348842fae8f405d3e709e68d56916 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Sub.hpp"
@@ -27,11 +27,27 @@ void Aidge::Sub_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // sub by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // sub elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // sub by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e659742c0bd200fa33b598f581cfef7b2f1e432e
--- /dev/null
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Div Operator
+    std::shared_ptr<Node> myDiv = Div();
+    auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d3e0c5e086fac9d31db817d628214e95d4e41a32
--- /dev/null
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Mul Operator
+    std::shared_ptr<Node> myMul = Mul();
+    auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c77615c11e99c174707df21560044fdd3b6a3c42
--- /dev/null
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Pow Operator
+    std::shared_ptr<Node> myPow = Pow();
+    auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b7b744410d31ea32dea5a15cc7a29da093488d14
--- /dev/null
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Sub Operator
+    std::shared_ptr<Node> mySub = Sub();
+    auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge