diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml
index a4579e2951ccbafc4335ae428c62eba94c0757e5..8d896c8ec9eb92dd87689d84cad5fc09bf03c4f1 100644
--- a/.gitlab/ci/build.gitlab-ci.yml
+++ b/.gitlab/ci/build.gitlab-ci.yml
@@ -95,60 +95,60 @@ build:ubuntu_python:
     paths:
       - venv/
 
-build:windows_cpp:
-  stage: build
-  needs: []
-  tags:
-    - windows
-
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install git -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    - mkdir -p build_cpp
-    - mkdir -p install_cpp
-    - cd build_cpp
-    - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
-    - cmake --build . -j2
-    - cmake --install . --config Debug
-
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - build_cpp/
-      - install_cpp/
-
-build:windows_python:
-  stage: build
-  needs: []
-  tags:
-    - windows
-
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install git -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    - python -m pip install virtualenv
-    - virtualenv venv
-    - venv\Scripts\Activate.ps1
-    # Numpy dependancy for unit test
-    - python -m pip install -r requirements.txt
-    - python -m pip install .
-  artifacts:
-    expire_in: 1 week
-    paths:
-      - venv/
+# build:windows_cpp:
+#   stage: build
+#   needs: []
+#   tags:
+#     - windows
+
+#   image: buildtools
+#   before_script:
+#     # Install Chocolatey
+#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+#     # Install dependencies
+#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+#     - choco install git -Y
+#     - choco install python -Y
+#     # Update PATH
+#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+#   script:
+#     - mkdir -p build_cpp
+#     - mkdir -p install_cpp
+#     - cd build_cpp
+#     - cmake -DCMAKE_INSTALL_PREFIX:PATH=../install_cpp -DCMAKE_BUILD_TYPE=Debug ..
+#     - cmake --build . -j2
+#     - cmake --install . --config Debug
+
+#   artifacts:
+#     expire_in: 1 week
+#     paths:
+#       - build_cpp/
+#       - install_cpp/
+
+# build:windows_python:
+#   stage: build
+#   needs: []
+#   tags:
+#     - windows
+
+#   image: buildtools
+#   before_script:
+#     # Install Chocolatey
+#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+#     # Install dependencies
+#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+#     - choco install git -Y
+#     - choco install python -Y
+#     # Update PATH
+#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+#   script:
+#     - python -m pip install virtualenv
+#     - virtualenv venv
+#     - venv\Scripts\Activate.ps1
+#     # Numpy dependancy for unit test
+#     - python -m pip install -r requirements.txt
+#     - python -m pip install .
+#   artifacts:
+#     expire_in: 1 week
+#     paths:
+#       - venv/
diff --git a/.gitlab/ci/test.gitlab-ci.yml b/.gitlab/ci/test.gitlab-ci.yml
index 81e6ca9ac5b868287aa0ef27040c0ead785d3639..abe526cdf3fac882177509cade20e5ed58ed7f77 100644
--- a/.gitlab/ci/test.gitlab-ci.yml
+++ b/.gitlab/ci/test.gitlab-ci.yml
@@ -26,23 +26,23 @@ test:ubuntu_python:
     reports:
       junit: ${CI_PROJECT_NAME}/xmlrunner-results.xml
 
-test:windows_cpp:
-  stage: test
-  needs: ["build:windows_cpp"]
-  tags:
-    - windows
-  image: buildtools
-  before_script:
-    # Install Chocolatey
-    - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
-    # Install dependencies
-    - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
-    - choco install python -Y
-    # Update PATH
-    - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
-  script:
-    - cd build_cpp
-    - ctest --output-junit ctest-results.xml --output-on-failure
-  artifacts:
-    reports:
-      junit: build_cpp/ctest-results.xml
+# test:windows_cpp:
+#   stage: test
+#   needs: ["build:windows_cpp"]
+#   tags:
+#     - windows
+#   image: buildtools
+#   before_script:
+#     # Install Chocolatey
+#     - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
+#     # Install dependencies
+#     - choco install cmake.install --installargs '"ADD_CMAKE_TO_PATH=System"' -Y
+#     - choco install python -Y
+#     # Update PATH
+#     - $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User")
+#   script:
+#     - cd build_cpp
+#     - ctest --output-junit ctest-results.xml --output-on-failure
+#   artifacts:
+#     reports:
+#       junit: build_cpp/ctest-results.xml
diff --git a/CHANGELOG b/CHANGELOG
index 82e90519cc6546e5fa2c2dfa76bc32893d7cad64..dc073620be324b16d904e0b05aec38f128c9b2e9 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,7 @@
+# Version 0.1.1 (January 29, 2024)
+
+[Add] Support of a negative value for Reshape Operator shape attribute.
+
 # Version 0.1.0 (January 23, 2024)
 
 Initial release
diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py
index 620beb160fb3494f156c1a4b512d386447081154..e4d2cb4faca3dda64cff6aea541c30787c23d0ad 100644
--- a/aidge_core/unit_tests/test_parameters.py
+++ b/aidge_core/unit_tests/test_parameters.py
@@ -39,12 +39,6 @@ class test_attributes(unittest.TestCase):
         self.assertEqual(fc_op.get_attr("OutChannels"), out_channels)
         self.assertEqual(fc_op.get_attr("NoBias"), nb_bias)
 
-    def test_matmul(self):
-        in_channels = 4
-        out_channels = 8
-        matmul_op = aidge_core.MatMul(in_channels, out_channels).get_operator()
-        self.assertEqual(matmul_op.get_attr("OutChannels"), out_channels)
-
     def test_producer_1D(self):
         dims = [5]
         producer_op = aidge_core.Producer(dims).get_operator()
diff --git a/aidge_core/unit_tests/test_recipies.py b/aidge_core/unit_tests/test_recipies.py
index 26ae544d6e05f2f9a9da371d3617f9265a037364..cc571d8e5db1beae7fbdb0047c8ae7ced3339fc9 100644
--- a/aidge_core/unit_tests/test_recipies.py
+++ b/aidge_core/unit_tests/test_recipies.py
@@ -45,9 +45,9 @@ class test_recipies(unittest.TestCase):
         self.assertTrue(all([i in old_nodes for i in graph_view.get_nodes()]))
 
     def test_fuse_matmul_add(self):
-        matmul0 = aidge_core.MatMul(1, 1, name="MatMul0")
+        matmul0 = aidge_core.MatMul(name="MatMul0")
         add0 = aidge_core.Add(2, name="Add0")
-        matmul1 = aidge_core.MatMul(1, 1, name="MatMul1")
+        matmul1 = aidge_core.MatMul(name="MatMul1")
         add1 = aidge_core.Add(2, name="Add1")
 
         graph_view = aidge_core.sequential([matmul0, add0, matmul1, add1])
diff --git a/include/aidge/aidge.hpp b/include/aidge/aidge.hpp
index c5b027e70f2153d106fbaccef166d85dbe1efe1f..9e0e457b49fe40b2a6e9e3ce5c5e4b77bee1d93e 100644
--- a/include/aidge/aidge.hpp
+++ b/include/aidge/aidge.hpp
@@ -14,13 +14,15 @@
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/backend/TensorImpl.hpp"
+#include "aidge/backend/StimulusImpl.hpp"
 
 #include "aidge/backend/cpu/data/TensorImpl.hpp"
 #include "aidge/backend/cpu/data/GetCPUPtr.h"
 
 #include "aidge/data/Data.hpp"
 #include "aidge/data/Tensor.hpp"
-
+#include "aidge/data/Database.hpp"
+#include "aidge/data/DataProvider.hpp"
 #include "aidge/graph/Connector.hpp"
 #include "aidge/graph/GraphView.hpp"
 #include "aidge/graph/Node.hpp"
@@ -61,6 +63,7 @@
 #include "aidge/operator/Sub.hpp"
 #include "aidge/operator/Transpose.hpp"
 #include "aidge/scheduler/Scheduler.hpp"
+#include "aidge/stimuli/Stimulus.hpp"
 
 #include "aidge/recipies/Recipies.hpp"
 
@@ -69,7 +72,5 @@
 #include "aidge/utils/DynamicAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 #include "aidge/utils/Types.h"
-//#include "aidge/utilsParsing/AstNode.hpp"
-//#include "aidge/utilsParsing/ParsingToken.hpp"
 
 #endif /* AIDGE_IMPORTS_H_ */
diff --git a/include/aidge/backend/StimulusImpl.hpp b/include/aidge/backend/StimulusImpl.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fbdf57b1587d76160c0cb146b6fe9da6947541dc
--- /dev/null
+++ b/include/aidge/backend/StimulusImpl.hpp
@@ -0,0 +1,32 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
+#define AIDGE_CORE_BACKEND_STIMULUSIMPL_H_
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Base class to implement data loading functions.
+ */
+class StimulusImpl {
+public:
+    virtual ~StimulusImpl() noexcept = default;
+
+    virtual std::shared_ptr<Tensor> load() const = 0;
+};
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_BACKEND_STIMULUSIMPL_H_ */
diff --git a/include/aidge/backend/TensorImpl.hpp b/include/aidge/backend/TensorImpl.hpp
index 62f13acb3db81954a4fbb753a3e68e1c5a516402..12551e71cd646564321eeb23f64bf68d77a8886c 100644
--- a/include/aidge/backend/TensorImpl.hpp
+++ b/include/aidge/backend/TensorImpl.hpp
@@ -12,8 +12,12 @@
 #ifndef AIDGE_TENSORIMPL_H_
 #define AIDGE_TENSORIMPL_H_
 
-#include <cstddef>
-#include <cstdio>
+#include <numeric>     // std::accumulate
+#include <cstddef>     // std::size_t
+#include <functional>  // std::multiplies
+#include <vector>
+#include <utility>     // std::pair, std::make_pair
+
 #include "aidge/data/Data.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
@@ -59,20 +63,42 @@ private:
 */
 
 /**
- * This class manages the raw data storage of a Tensor and provide generic copy
+ * @class TensorImpl
+ * @brief Class to manage the raw data storage of a Tensor and provide generic copy
  * primitives from other devices and from/to host.
- * It can own the data or not (use setRawPtr() to set an external data owner).
- * It only knows the data type and data capacity, but does not handle anything else.
+ * @note It can own the data or not (use ``setRawPtr()`` to set an external data owner).
+ * @note It only knows the data type and data capacity, but does not handle anything else.
 */
 class TensorImpl {
+protected:
+
+    const char *mBackend;
+    /// @brief Device id.
+    const DeviceIdx_t mDevice;
+    /// Number of elements (to be) stored.
+    NbElts_t mNbElts;
+
 public:
     TensorImpl() = delete;
-    TensorImpl(const char *backend, DeviceIdx_t device, NbElts_t length) : mBackend(backend), mDevice(device), mNbElts(length) {};
 
+    TensorImpl(const char *backend, DeviceIdx_t device, std::vector<DimSize_t> dims)
+        : mBackend(backend),
+          mDevice(device)
+    {
+        resize(dims);
+    };
+
+    virtual ~TensorImpl() = default;
+
+    virtual bool operator==(const TensorImpl &othImpl) const = 0;
+
+public:
     /**
      * Return the (backend, device) pair for this implementation.
     */
-    std::pair<std::string, DeviceIdx_t> device() const { return std::make_pair(mBackend, mDevice); }
+    std::pair<std::string, DeviceIdx_t> device() const noexcept {
+        return std::make_pair(std::string(mBackend), mDevice);
+    }
 
     /**
      * Copy data from the same device.
@@ -147,8 +173,8 @@ public:
     /**
      * Set the size, in number of elements, that must be stored.
     */
-    void resize(NbElts_t length) {
-        mNbElts = length;
+    virtual void resize(std::vector<DimSize_t> dims) {
+        mNbElts = std::accumulate(dims.cbegin(), dims.cend(), std::size_t(1), std::multiplies<std::size_t>());
     }
 
     /**
@@ -161,23 +187,15 @@ public:
     */
     virtual std::size_t scalarSize() const noexcept = 0;
     constexpr const char *backend() const { return mBackend; }
-    virtual ~TensorImpl() = default;
-    virtual bool operator==(const TensorImpl &othImpl) const = 0;
 
     /**
-     * Copy from another backend.
+     * @brief Copy from another backend.
      * @param srcImpl Source TensorImpl to copy from.
      * @param length Number of elements of size scalarSize() to copy
      * @param srcOffset Source offset (in number of elements).
      * @param dstOffset Destination offset (in number of elements).
     */
     void copyFrom(const TensorImpl& srcImpl, NbElts_t length, NbElts_t srcOffset = 0, NbElts_t dstOffset = 0);
-
-protected:
-    const char *mBackend;
-    const DeviceIdx_t mDevice;
-    /// Number of elements (to be) stored
-    NbElts_t mNbElts;
 };
 
 } // namespace Aidge
diff --git a/include/aidge/backend/cpu/data/GetCPUPtr.h b/include/aidge/backend/cpu/data/GetCPUPtr.h
index 47e3b07e8fa08cdcd714745a9a49bb03e30f79f5..b3e0fd967457585e7e3719f92dde7d6d93eee903 100644
--- a/include/aidge/backend/cpu/data/GetCPUPtr.h
+++ b/include/aidge/backend/cpu/data/GetCPUPtr.h
@@ -12,13 +12,16 @@
 #ifndef AIDGE_CPU_DATA_GETCPUPTR_H_
 #define AIDGE_CPU_DATA_GETCPUPTR_H_
 
+#include <cstddef>
+#include <memory>
+
 #include "aidge/data/Tensor.hpp"
 
 namespace Aidge {
-inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data) {
+inline void *getCPUPtr(std::shared_ptr<Aidge::Data> const &data, const std::size_t offset = 0) {
   const auto tensor = std::static_pointer_cast<Tensor>(data);
-  return tensor->getImpl()->hostPtr(tensor->getImplOffset());
+  return tensor->getImpl()->hostPtr(tensor->getImplOffset() + offset);
 }
 } // namespace Aidge
 
-#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
\ No newline at end of file
+#endif // AIDGE_CPU_DATA_GETCPUPTR_H_
diff --git a/include/aidge/backend/cpu/data/TensorImpl.hpp b/include/aidge/backend/cpu/data/TensorImpl.hpp
index 46dfae3d53b4b201507290bd538ea13737919c3e..78efc4a29f5aef4395b556d23d99da7609ff762c 100644
--- a/include/aidge/backend/cpu/data/TensorImpl.hpp
+++ b/include/aidge/backend/cpu/data/TensorImpl.hpp
@@ -33,7 +33,7 @@ private:
 public:
     static constexpr const char *Backend = "cpu";
 
-    TensorImpl_cpu(DeviceIdx_t device, NbElts_t length) : TensorImpl(Backend, device, length) {}
+    TensorImpl_cpu(DeviceIdx_t device, std::vector<DimSize_t> dims) : TensorImpl(Backend, device, dims) {}
 
     bool operator==(const TensorImpl &otherImpl) const override final {
         const auto& typedOtherImpl = reinterpret_cast<const TensorImpl_cpu<T> &>(otherImpl);
@@ -47,8 +47,8 @@ public:
         return i == mNbElts;
     }
 
-    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, NbElts_t length) {
-        return std::make_shared<TensorImpl_cpu<T>>(device, length);
+    static std::shared_ptr<TensorImpl_cpu> create(DeviceIdx_t device, std::vector<DimSize_t> dims) {
+        return std::make_shared<TensorImpl_cpu<T>>(device, dims);
     }
 
     inline std::size_t scalarSize() const noexcept override final { return sizeof(T); }
@@ -183,10 +183,18 @@ static Registrar<Tensor> registrarTensorImpl_cpu_Float32(
         {"cpu", DataType::Float32}, Aidge::TensorImpl_cpu<float>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Float16(
         {"cpu", DataType::Float16}, Aidge::TensorImpl_cpu<half_float::half>::create);
-static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
-        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
 static Registrar<Tensor> registrarTensorImpl_cpu_Int64(
         {"cpu", DataType::Int64}, Aidge::TensorImpl_cpu<long>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int32(
+        {"cpu", DataType::Int32}, Aidge::TensorImpl_cpu<int>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int16(
+        {"cpu", DataType::Int16}, Aidge::TensorImpl_cpu<int16_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt16(
+        {"cpu", DataType::UInt16}, Aidge::TensorImpl_cpu<uint16_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_Int8(
+        {"cpu", DataType::Int8}, Aidge::TensorImpl_cpu<int8_t>::create);
+static Registrar<Tensor> registrarTensorImpl_cpu_UInt8(
+        {"cpu", DataType::UInt8}, Aidge::TensorImpl_cpu<uint8_t>::create);
 }  // namespace
 }  // namespace Aidge
 
diff --git a/include/aidge/data/DataProvider.hpp b/include/aidge/data/DataProvider.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5c7a1c73ce4ad4eb512a446879cb1ad9b673eb2f
--- /dev/null
+++ b/include/aidge/data/DataProvider.hpp
@@ -0,0 +1,64 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATAPROVIDER_H_
+#define AIDGE_CORE_DATA_DATAPROVIDER_H_
+
+#include <cstddef>  // std::size_t
+#include <memory>   // std::shared_ptr
+#include <string>
+#include <vector>   // std::vector
+
+#include "aidge/data/Database.hpp"
+#include "aidge/data/Data.hpp"
+
+
+
+namespace Aidge {
+
+/**
+ * @brief Data Provider. Takes in a database and compose batches by fetching data from the given database.
+ * @todo Implement Drop last batch option. Currently returns the last batch with less elements in the batch.
+ * @todo Implement readRandomBatch to compose batches from the database with a random sampling startegy. Necessary for training.
+ */
+class DataProvider {
+private:
+    // Dataset providing the data to the dataProvider
+    const Database& mDatabase;
+
+    const std::size_t mNumberModality;
+    std::vector<std::vector<std::size_t>> mDataSizes;
+    std::vector<std::string> mDataBackends;
+    std::vector<DataType> mDataTypes;
+
+    // Desired size of the produced batches
+    const std::size_t mBatchSize;
+
+public:
+    /**
+     * @brief Constructor of Data Provider.
+     * @param database database from which to load the data.
+     * @param batchSize number of data samples per batch.
+     */
+    DataProvider(const Database& database, const std::size_t batchSize);
+
+public:
+    /**
+     * @brief Create a batch for each data modality in the database. The returned batch contain the data as sorted in the database.
+     * @param startIndex the starting index in the database to start the batch from.
+     * @return a vector of tensors. Each tensor is a batch corresponding to one modality.
+     */
+    std::vector<std::shared_ptr<Tensor>> readBatch(const std::size_t startIndex) const;
+};
+
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATAPROVIDER_H_ */
diff --git a/include/aidge/data/Database.hpp b/include/aidge/data/Database.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..edd4b4639fb415dfd723aca987ae754f6d5ccc63
--- /dev/null
+++ b/include/aidge/data/Database.hpp
@@ -0,0 +1,57 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_DATA_DATABASE_H_
+#define AIDGE_CORE_DATA_DATABASE_H_
+
+#include <cstddef>
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+
+namespace Aidge {
+
+/**
+ * @brief Abstract class representing a map from a key to data.
+ * All databases should inherit from this class. All subclasses should overwrite
+ * :cpp:function:`Database::getItem` to fetch data from a given index.
+ */
+class Database {
+public:
+    Database() = default;
+    virtual ~Database() noexcept = default;
+
+    /**
+     * @brief Fetch an item of the database.
+     * @param index index of the item.
+     * @return vector of data mapped to index.
+     */
+    virtual std::vector<std::shared_ptr<Tensor>> getItem(const std::size_t index) const = 0;
+
+    /**
+     * @brief Get the number of items in the database
+     *
+     * @return std::size_t
+     */
+    virtual std::size_t getLen() const noexcept = 0;
+
+    /**
+     * @brief Get the number of modalities in one database item
+     *
+     * @return std::size_t
+     */
+    virtual std::size_t getNbModalities() const noexcept = 0;
+
+};
+} // namespace Aidge
+
+#endif /* AIDGE_CORE_DATA_DATABASE_H_ */
diff --git a/include/aidge/data/Tensor.hpp b/include/aidge/data/Tensor.hpp
index 658c0b497d9753f1bdfd42a274dbb48970cb6d6b..3ccd55d3f19b3cff70c1a100d980ae63213261c5 100644
--- a/include/aidge/data/Tensor.hpp
+++ b/include/aidge/data/Tensor.hpp
@@ -17,6 +17,7 @@
 #include <memory>
 #include <numeric>   // std::accumulate
 #include <string>
+#include <type_traits>  // std::is_arithmetic
 #include <vector>
 
 #include "aidge/backend/TensorImpl.hpp"
@@ -32,7 +33,7 @@ namespace Aidge {
  * Contains a pointer to an actual contiguous implementation of data.
  */
 class Tensor : public Data,
-               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)> {
+               public Registrable<Tensor, std::tuple<std::string, DataType>, std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)> {
    private:
     DataType mDataType; /** enum to specify data type. */
     std::vector<DimSize_t> mDims; /** Dimensions of the tensor. */
@@ -59,11 +60,25 @@ class Tensor : public Data,
         // ctor
     }
 
+    /**
+     * @brief Construct a new Tensor object from dimensions.
+     *
+     * @param dims dimensions of the tensor
+     * @param dataType datatype of the tensor (default = DataType::Float32)
+     */
+    Tensor(const std::vector<DimSize_t>& dims, DataType dataType = DataType::Float32)
+        : Data(Type),
+          mDataType(dataType),
+          mDims(dims)
+    {
+        computeSize();
+    }
+
     /**
      * @brief Construct a new Tensor object from another one (shallow copy).
      * Data memory is not copied, but shared between the new Tensor and the
      * initial one.
-     * 
+     *
      * @param otherTensor
      */
     Tensor(const Tensor&)            = default;
@@ -78,13 +93,24 @@ class Tensor : public Data,
             newTensor.makeContiguous();
         }
         else {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
             newImpl->copy(mImpl->rawPtr(mImplOffset), mSize);
             newTensor.setImpl(newImpl);
         }
         return newTensor;
     }
 
+    template<typename T,
+            typename VT = std::enable_if_t<std::is_arithmetic<T>::value, std::decay_t<T>>>
+    Tensor(T val)
+        : Data(Type),
+          mDataType(NativeType<VT>::type),
+          mDims({}), mStrides({1}),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<VT>::type})(0, std::vector<std::size_t>())),
+          mSize(1) {
+        *static_cast<VT*>(mImpl->rawPtr()) = static_cast<VT>(val);
+    }
+
     /**
      * @brief Construct a new Tensor object from the 1-dimension Array helper.
      * @tparam T datatype
@@ -96,7 +122,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0}),
           mStrides({1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0})),
           mSize(SIZE_0) {
         mImpl->copyFromHost(&arr.data[0], SIZE_0);
     }
@@ -105,7 +131,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array1D<T, SIZE_0> &&arr) {
         resize({SIZE_0});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0});
         }
         mImpl->copyFromHost(&arr.data[0], SIZE_0, mImplOffset);
         return *this;
@@ -123,7 +149,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1}),
           mStrides({SIZE_1, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1})),
           mSize(SIZE_0 * SIZE_1) {
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1);
     }
@@ -132,7 +158,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array2D<T, SIZE_0, SIZE_1> &&arr) {
         resize({SIZE_0, SIZE_1});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1});
         }
         mImpl->copyFromHost(&arr.data[0][0], SIZE_0 * SIZE_1, mImplOffset);
         return *this;
@@ -151,7 +177,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2}),
           mStrides({SIZE_1 * SIZE_2, SIZE_2, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2) {
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2);
     }
@@ -160,7 +186,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array3D<T, SIZE_0, SIZE_1, SIZE_2> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2});
         }
         mImpl->copyFromHost(&arr.data[0][0][0], SIZE_0 * SIZE_1 * SIZE_2, mImplOffset);
         return *this;
@@ -180,7 +206,7 @@ class Tensor : public Data,
           mDataType(NativeType<T>::type),
           mDims({SIZE_0, SIZE_1, SIZE_2, SIZE_3}),
           mStrides({SIZE_1 * SIZE_2 * SIZE_3, SIZE_2 * SIZE_3, SIZE_3, 1}),
-          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3)),
+          mImpl(Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3})),
           mSize(SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3) {
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
     }
@@ -189,7 +215,7 @@ class Tensor : public Data,
     constexpr Tensor &operator=(Array4D<T, SIZE_0, SIZE_1, SIZE_2, SIZE_3> &&arr) {
         resize({SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         if (!mImpl) {
-            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3);
+            mImpl = Registrar<Tensor>::create({"cpu", NativeType<T>::type})(0, {SIZE_0, SIZE_1, SIZE_2, SIZE_3});
         }
         mImpl->copyFromHost(&arr.data[0][0][0][0], SIZE_0 * SIZE_1 * SIZE_2 * SIZE_3, mImplOffset);
         return *this;
@@ -250,7 +276,7 @@ class Tensor : public Data,
             if (mImpl->device() != std::make_pair(name, device)) {
                 // Backend change: create new impl, copy from old to new and replace
                 // impl
-                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mImpl->size());
+                std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
                 if (copyFrom) {
                     newImpl->copyFrom(*mImpl, mImpl->size(), mImplOffset, 0);
                 }
@@ -258,7 +284,7 @@ class Tensor : public Data,
             }
         }
         else {
-            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mSize);
+            mImpl = Registrar<Tensor>::create({name, mDataType})(device, mDims);
         }
     }
 
@@ -277,7 +303,7 @@ class Tensor : public Data,
      * @brief Get the data type enum.
      * @return constexpr DataType
      */
-    constexpr DataType dataType() const { return mDataType; }
+    constexpr DataType dataType() const noexcept { return mDataType; }
 
     /**
      * @brief Set the DataType of the Tensor and converts data
@@ -288,7 +314,7 @@ class Tensor : public Data,
      */
     void setDataType(const DataType dt, bool copyCast = true) {
         if (mImpl && (dataType() != dt)) {
-            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mImpl->size());
+            std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), dt})(mImpl->device().second, mDims);
             if (copyCast) {
                 newImpl->copyCast(mImpl->rawPtr(mImplOffset), mDataType, mImpl->size());
             }
@@ -306,7 +332,7 @@ class Tensor : public Data,
 
     /**
      * @brief Set the Impl object
-     * 
+     *
      * @param impl New impl shared pointer
      * @param implOffset Storage offset in this new impl for this Tensor
      */
@@ -320,7 +346,7 @@ class Tensor : public Data,
      * @return true
      * @return false
      */
-    bool hasImpl() const { return (mImpl) ? true : false; }
+    bool hasImpl() const noexcept { return mImpl ? true : false; }
 
     /**
      * @brief Get number of dimensions of the Tensor.
@@ -355,13 +381,13 @@ class Tensor : public Data,
      * @brief Return true if Tensor is contiguous in memory.
      * @return bool
      */
-    constexpr bool isContiguous() const { return mContiguous; }
+    constexpr bool isContiguous() const noexcept { return mContiguous; }
 
     /**
      * @brief Get the number of elements in the Tensor object.
      * @return constexpr std::size_t
      */
-    constexpr std::size_t size() const { return mSize; }
+    constexpr std::size_t size() const noexcept { return mSize; }
 
     /**
      * @brief Change the dimensions of the Tensor object according to the given argument.
@@ -375,7 +401,7 @@ class Tensor : public Data,
      * @param dims New dimensions
      */
     template <std::array<DimSize_t, 1>::size_type DIM> // deducing std::array size_type and declaring DIM accordingly
-    void resize(const std::array<DimSize_t, DIM> &dims) {
+    inline void resize(const std::array<DimSize_t, DIM> &dims) {
         resize(std::vector<DimSize_t>(dims.begin(), dims.end()));
     }
 
@@ -390,48 +416,7 @@ class Tensor : public Data,
      * @param dims New dimensions
      * @param strides Stride of the tensor (if not specified, "nested" stride is used)
      */
-    void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>()) {
-        bool checkContiguous = true;
-        if (strides.empty()) {
-            strides.resize(dims.size());
-            size_t expectedStride = 1;
-            for (int dim = dims.size() - 1; dim >= 0; --dim) {
-                strides[dim] = expectedStride;
-                expectedStride*= dims[dim];
-            }
-            checkContiguous = false;
-        }
-        else {
-            AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
-        }
-
-        if (mImpl.use_count() > 1) {
-            // Here we could also create a new storage for this tensor in this case
-            // But, is it more likely that the user really wants this, or that he did a mistake?
-            AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
-        }
-        else {
-            mDims = dims;
-            mStrides = strides;
-
-            mContiguous = true;
-            if (checkContiguous) {
-                size_t expectedStride = 1;
-                for (int dim = dims.size() - 1; dim >= 0; --dim) {
-                    if (strides[dim] != expectedStride) {
-                        mContiguous = false;
-                        break;
-                    }
-                    expectedStride*= dims[dim];
-                }
-            }
-
-            computeSize();
-            if (mImpl) {
-                mImpl->resize(mSize);
-            }
-        }
-    }
+    void resize(const std::vector<DimSize_t> &dims, std::vector<DimSize_t> strides = std::vector<DimSize_t>());
 
     /**
      * @brief Return if the Tensor object has at leastone element.
@@ -465,95 +450,7 @@ class Tensor : public Data,
         set<expectedType>(getStorageIdx(coordIdx), value);
     }
 
-
-
-    std::string toString() const {
-        AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
-
-        // TODO: move lambda elsewhere?
-        auto ptrToString = [](DataType dt, void* ptr, size_t idx) {
-            switch (dt) {
-            case DataType::Float64:
-                return std::to_string(static_cast<double*>(ptr)[idx]);
-            case DataType::Float32:
-                return std::to_string(static_cast<float*>(ptr)[idx]);
-            case DataType::Float16:
-                return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
-            case DataType::Int8:
-                return std::to_string(static_cast<int8_t*>(ptr)[idx]);
-            case DataType::Int16:
-                return std::to_string(static_cast<int16_t*>(ptr)[idx]);
-            case DataType::Int32:
-                return std::to_string(static_cast<int32_t*>(ptr)[idx]);
-            case DataType::Int64:
-                return std::to_string(static_cast<int64_t*>(ptr)[idx]);
-            case DataType::UInt8:
-                return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
-            case DataType::UInt16:
-                return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
-            case DataType::UInt32:
-                return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
-            case DataType::UInt64:
-                return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
-            default:
-                AIDGE_ASSERT(true, "unsupported type to convert to string");
-            }
-            return std::string("?");  // To make Clang happy
-        };
-
-        if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
-        std::string res;
-        std::size_t dim = 0;
-        std::size_t counter = 0;
-        if (nbDims()>=2) {
-            std::vector<std::size_t> dimVals(nbDims(), 0);
-            res += "{\n";
-            while (counter < mSize) {
-                std::string spaceString = std::string((dim+1)<<1,' ');
-                if (dim < nbDims()-2) {
-                    if (dimVals[dim] == 0) {
-                        res += spaceString + "{\n";
-                        ++dim;
-                    } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
-                        res += spaceString + "},\n" + spaceString + "{\n";
-                        ++dim;
-                    } else {
-                        res += spaceString + "}\n";
-                        dimVals[dim--] = 0;
-                        dimVals[dim]++;
-                    }
-                } else {
-                    for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
-                        res += spaceString + "{";
-                        for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
-                            res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
-                        }
-                        res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
-                        if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
-                            res += ",";
-                        }
-                        res += "\n";
-                    }
-                    if (dim == 0) {
-                        break;
-                    }
-                    dimVals[dim--] = 0;
-                    dimVals[dim]++;
-                }
-            }
-
-            for(int i = static_cast<int>(dim); i > 0; --i) {
-                res += std::string((dim+1)<<1,' ') + "}\n";
-            }
-        } else {
-            res += "{";
-            for (DimSize_t j = 0; j < dims()[0]; ++j) {
-                res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
-            }
-        }
-        res += "}";
-        return res;
-    }
+    std::string toString() const;
 
     inline void print() const { printf("%s\n", toString().c_str()); }
 
@@ -621,7 +518,7 @@ class Tensor : public Data,
     }
 
     /**
-     * Returns a sub-tensor with one or more dimension less.
+     * @brief Returns a sub-tensor with one or more dimension less.
      * For instance, t.extract({1}) on a CHW tensor will return the HW tensor
      * of channel #1.
      * Likewise, t.extract({0, 1}) on a NCHW tensor will return the HW tensor
@@ -631,15 +528,15 @@ class Tensor : public Data,
      * tensor is returned.
      * It current tensor was contiguous, the returned tensor is garanteed to be
      * contiguous as well.
-     * 
+     *
      * @param coordIdx Coordinates of the sub-tensor to extract
      * @return Tensor Sub-tensor.
     */
     Tensor extract(const std::vector<std::size_t>& coordIdx) const;
 
     /**
-     * Returns a sub-tensor at some coordinate and with some dimension.
-     * 
+     * @brief Returns a sub-tensor at some coordinate and with some dimension.
+     *
      * @param coordIdx First coordinates of the sub-tensor to extract
      * @param dims Dimensions of the sub-tensor to extract
      * @return Tensor Sub-tensor.
@@ -647,7 +544,7 @@ class Tensor : public Data,
     Tensor extract(const std::vector<std::size_t>& coordIdx, const std::vector<std::size_t>& dims) const;
 
     /**
-     * Make the tensor's storage contiguous, if it is not already the case.
+     * @brief Make the tensor's storage contiguous, if it is not already the case.
      * If not contiguous, a new memory space is allocated.
     */
     void makeContiguous();
@@ -704,7 +601,7 @@ class Tensor : public Data,
      * The data type, backend and device stay the same.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
-     * will occur if fallback has already been allocated with the right 
+     * will occur if fallback has already been allocated with the right
      * type/size/device.
      * @return Reference to either itself or to fallback.
     */
@@ -782,10 +679,10 @@ class Tensor : public Data,
     }
 
     /**
-     * Return a reference to a Tensor on desired data type and backend/device:
+     * @brief Return a reference to a Tensor on desired data type and backend/device:
      * - itself, if already with the right characteristics;
      * - the provided Tensor, overwritten with the right characteristics.
-     * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on
+     * @note no data is copy-casted. If it was so in a previous refCastFrom() on
      * the same fallback, it remains valid, otherwise, data is invalid.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
@@ -800,11 +697,11 @@ class Tensor : public Data,
     const Tensor& ref(std::shared_ptr<Tensor>& fallback, const Aidge::DataType& dt, const std::string &backend, DeviceIdx_t device = 0) const;
 
     /**
-     * Return a reference to a Tensor with same characteristics
+     * @brief Return a reference to a Tensor with same characteristics
      * (data type, backend/device) as targetReqs Tensor:
      * - itself, if already with the right characteristics;
      * - the provided Tensor, overwritten with the right characteristics.
-     * NOTE: no data is copy-casted. If it was so in a previous refCastFrom() on
+     * @note no data is copy-casted. If it was so in a previous refCastFrom() on
      * the same fallback, it remains valid, otherwise, data is invalid.
      * @param fallback A shared_ptr to Tensor ready to be overwritten if necessary.
      * The shared_ptr does not need to be initialized. No new memory allocation
@@ -819,7 +716,11 @@ class Tensor : public Data,
     }
 
 private:
-    ///\bug not protected against overflow
+    /**
+     * @brief Compute the number of elements in the Tensor.
+     * @note If dimensions are not empty, they are multiplied to get the total number
+     * of elements. Else, the Tensor represents a scalar and contains a single element.
+     */
     void computeSize() {
         mSize = std::accumulate(mDims.begin(), mDims.end(), DimSize_t(1), std::multiplies<DimSize_t>());
     }
diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 813301a144682ba3e99de31ae324ffaedcc5209f..392fb59e65b8b844a091aaa89e7d623986dda85b 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -209,7 +209,7 @@ public:
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
      */
-    void forwardDims();
+    void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string &backend, DeviceIdx_t device = 0);
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 9aed8299a67ab719141b6fe199ebf3f52fb7d387..97a4ef69bd371e80c4e63303feac5e64197670b3 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -68,13 +68,7 @@ public:
     // }
 
 
-    // void checkDims() const override final {
-    //     assert(outputDimsForwarded());
-    //     for (const auto& in : mInputs) {
-    //         assert(in->dims() == mOutputs[0]->dims());
-    //     }
-    // }
-
+    void computeOutputDims() override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
         mImpl = Registrar<Add_Op>::create(name)(*this);
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 94b755e0fdb0f76d54cd4f046fb8b08dda05b6b2..a033c6920a374003ad869bddbf5641c48fc5f6e2 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -60,7 +60,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index c966b5f5c1bb4914f3e46f96493da87a6707b1ff..624af6e755d882ca9585ac2e4175f9c3977e4058 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -59,7 +59,7 @@ public:
     // Helper functions that can be used with setComputeOutputDims():
     static const ComputeDimsFunc Identity;
 
-    void setComputeOutputDims(ComputeDimsFunc func) {
+    inline void setComputeOutputDims(ComputeDimsFunc func) {
         mComputeOutputDims = func;
     }
 
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 3d80193be3f669b00e5a138470269e52d0715780..a011c8666bba55eb7254a8efcd432a3f680cd461 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -12,49 +12,32 @@
 #ifndef AIDGE_CORE_OPERATOR_MATMUL_H_
 #define AIDGE_CORE_OPERATOR_MATMUL_H_
 
-#include <array>
-#include <cmath>
-#include <numeric>
 #include <memory>
+#include <string>
 #include <vector>
 
 #include "aidge/utils/Types.h"
 #include "aidge/data/Tensor.hpp"
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
-#include "aidge/operator/Producer.hpp"
-#include "aidge/utils/StaticAttributes.hpp"
 #include "aidge/utils/Registrar.hpp"
 
 namespace Aidge {
-enum class MatMulAttr { OutChannels };
 
 class MatMul_Op : public OperatorTensor,
               public Registrable<MatMul_Op,
                                  std::string,
-                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)>,
-              public StaticAttributes<MatMulAttr, DimSize_t> {
+                                 std::unique_ptr<OperatorImpl>(const MatMul_Op &)> {
 public:
     static const std::string Type;
 
-    MatMul_Op() = delete;
-
-    using Attributes_ = StaticAttributes<MatMulAttr, DimSize_t>;
-    template <MatMulAttr e> using attr = typename Attributes_::template attr<e>;
-
-    MatMul_Op(DimSize_t out_channels)
-            : OperatorTensor(Type, 1, 1, 1),
-            Attributes_(
-                attr<MatMulAttr::OutChannels>(out_channels))
-    {}
+    MatMul_Op() : OperatorTensor(Type, 2, 0, 1) {}
 
     /**
      * @brief Copy-constructor. Copy the operator attributes and its output tensor(s), but not its input tensors (the new operator has no input associated).
      * @param op Operator to copy.
      */
-    MatMul_Op(const MatMul_Op& op)
-        : OperatorTensor(op),
-          Attributes_(op)
+    MatMul_Op(const MatMul_Op& op) : OperatorTensor(op)
     {
         mImpl = op.mImpl ? Registrar<MatMul_Op>::create(op.mOutputs[0]->getImpl()->backend())(*this) : nullptr;
     }
@@ -63,50 +46,40 @@ public:
      * @brief Clone the operator using its copy-constructor.
      * @see Operator::MatMul_Op
      */
-    std::shared_ptr<Operator> clone() const override {
+    std::shared_ptr<Operator> clone() const override final {
         return std::make_shared<MatMul_Op>(*this);
     }
 
-
-    void computeOutputDims() override final {
-        bool associated = true;
-        for (IOIndex_t i = 0; i < nbInputs(); ++i) {
-            if (!getInput(i)) {
-                AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
-            }
-            associated &= !(getInput(i)->empty());
-        }
-        if (associated) {
-            // <batch, OutChannels>
-            mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<MatMulAttr::OutChannels>()});
-        }
-    }
+    /**
+     * @brief Compute dimensions for the output Tensor following the same rules as
+     * numpy.matmul.
+     * @note - Both inputs are 2-D Tensors: classic matrix multiplication
+     * @note - Either input is N-D with N > 2: it is treated as a stack of matrices residing
+     * in the last two indexes and broadcast accordingly.
+     * @note - First input is 1-D: it is promoted to a matrix by prepending a 1 to its
+     * dimensions (D) -> (1,D). The prepended 1 is removed after computation.
+     * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its
+     * dimensions (D) -> (D,1). The appended 1 is removed after computation.
+     */
+    void computeOutputDims() override final;
 
 
-    void setBackend(const std::string& name, DeviceIdx_t device = 0) override {
+    void setBackend(const std::string& name, DeviceIdx_t device = 0) override final {
         mImpl = Registrar<MatMul_Op>::create(name)(*this);
         mOutputs[0]->setBackend(name, device);
     }
 
-    static const std::vector<std::string> getInputsName(){
-        return {"data_input", "weight"};
+    static const std::vector<std::string> getInputsName() {
+        return {"data_input1", "data_input2"};
     }
-    static const std::vector<std::string> getOutputsName(){
+    static const std::vector<std::string> getOutputsName() {
         return {"data_output"};
     }
 };
 
-inline std::shared_ptr<Node> MatMul(DimSize_t inChannels, DimSize_t outChannels, const std::string& name = "") {
-    // FIXME: properly handle default w initialization in every cases
-    auto matmul = std::make_shared<Node>(std::make_shared<MatMul_Op>(outChannels), name);
-    addProducer(matmul, 1, {outChannels, inChannels}, "w");
-    return matmul;
+inline std::shared_ptr<Node> MatMul(const std::string& name = "") {
+    return std::make_shared<Node>(std::make_shared<MatMul_Op>(), name);
 }
 } // namespace Aidge
 
-namespace {
-template <>
-const char *const EnumStrings<Aidge::MatMulAttr>::data[] = {"OutChannels"};
-}
-
 #endif /* AIDGE_CORE_OPERATOR__MATMUL_H_ */
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index 78b2fa5f98c9dae66ae291769f2de08d7805a738..8758021a9c3de1707a96bbfafc21686ded8b7e40 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -62,7 +62,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/Operator.hpp b/include/aidge/operator/Operator.hpp
index d6ac2561654afa8d14ed2ac66ef1430ecd00ffee..a37c260d22c0d19031e303005ec3c5b800c9d51e 100644
--- a/include/aidge/operator/Operator.hpp
+++ b/include/aidge/operator/Operator.hpp
@@ -118,8 +118,14 @@ public:
      * @brief Set a new OperatorImpl to the Operator
      *
      */
-    void setImpl(std::shared_ptr<OperatorImpl> impl){
-        mImpl = impl;
+    inline void setImpl(std::shared_ptr<OperatorImpl> impl) { mImpl = impl; }
+
+    /**
+     * @brief Get the OperatorImpl of the Operator
+     *
+     */
+    inline std::shared_ptr<OperatorImpl> getImpl() const noexcept {
+        return mImpl;
     }
 
     /**
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index d498cacc7c5b2ddc3269f3ebc77707aead8eb52d..ba8d3d05877f9aa543518fff1d88f4e8a436b712 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -60,7 +60,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index 52d0118743373c23a4afe4a51d3f22adbe9e6848..5f07cddfa667e7e494defe38a5667332744c3e20 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -12,8 +12,10 @@
 #ifndef AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 #define AIDGE_CORE_OPERATOR_REDUCEMEAN_H_
 
+#include <algorithm>  // std::for_each
 #include <array>
 #include <cmath>
+#include <cstdint>    // std::int32_t
 #include <numeric>
 #include <vector>
 
@@ -31,18 +33,18 @@ enum class ReduceMeanAttr { Axes, KeepDims };
 template <DimIdx_t DIM>
 class ReduceMean_Op : public OperatorTensor,
                 public Registrable<ReduceMean_Op<DIM>, std::string, std::unique_ptr<OperatorImpl>(const ReduceMean_Op<DIM> &)>,
-                public StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t> {
+                public StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t> {
 
    public:
     static const std::string Type;
 
     ReduceMean_Op() = delete;
 
-    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<int, DIM>, DimSize_t>;
+    using Attributes_ = StaticAttributes<ReduceMeanAttr, std::array<std::int32_t, DIM>, DimSize_t>;
     template <ReduceMeanAttr e>
     using attr = typename Attributes_::template attr<e>;
 
-    constexpr ReduceMean_Op(const std::array<int, DIM> &axes, DimSize_t keep_dims)
+    constexpr ReduceMean_Op(const std::array<std::int32_t, DIM> &axes, DimSize_t keep_dims)
         : OperatorTensor(Type, 1, 0, 1),
           Attributes_(attr<ReduceMeanAttr::Axes>(axes),
                       attr<ReduceMeanAttr::KeepDims>(keep_dims)) {}
@@ -67,29 +69,28 @@ class ReduceMean_Op : public OperatorTensor,
     }
 
     void computeOutputDims() override final {
+        if (!getInput(0)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
         if (!getInput(0)->empty()) {
-            std::vector<DimSize_t> outDims;
-            for(std::size_t d=0; d<getInput(0)->dims().size(); ++d)
-            {
-                bool reducedDim =  false;
-                for(std::size_t i=0; i<DIM; ++i)
-                {
-                    int axis_ = this->template getAttr<ReduceMeanAttr::Axes>()[i];
-                    std::size_t axis= axis_>=0? axis_: axis_ + getInput(0)->nbDims();
-                    if(axis == d)
-                    {
-                        reducedDim = true;
-                        break;
-                    }
-                }
-                if(reducedDim)
-                {
-                    if(this->template getAttr<ReduceMeanAttr::KeepDims>())
-                        outDims.push_back(1);
-                }
-                else
-                    outDims.push_back(getInput(0)->dims()[d]);
+            // make Axes attribute positive
+            std::array<std::int32_t, DIM>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+            std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+                if (val < 0)
+                    val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+            });
+            std::sort(axes.begin(), axes.end());
+
+            // build output dimensions
+            std::vector<DimSize_t> outDims = getInput(0)->dims();
+            if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+                std::for_each(axes.begin(), axes.end(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
+            }
+            else {
+                for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                    outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
             }
+
             if(outDims.size()>0)
                 mOutputs[0]->resize(outDims);
             else
@@ -111,7 +112,7 @@ class ReduceMean_Op : public OperatorTensor,
 };
 
 template <std::array<DimSize_t, 1>::size_type DIM>
-inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
+inline std::shared_ptr<Node> ReduceMean(const std::array<std::int32_t, DIM> &axes,
                                         DimSize_t keep_dims=1,
                                         const std::string& name = "") {
     // FIXME: properly handle default w&b initialization in every cases
@@ -123,7 +124,7 @@ inline std::shared_ptr<Node> ReduceMean(const std::array<int, DIM> &axes,
 // helper with C-style array instead of std::array for kernel_dims to allow automatic template DIM deduction
 template <DimSize_t DIM>
 inline std::shared_ptr<Node> ReduceMean(
-    int const (&axes)[DIM],
+    std::int32_t const (&axes)[DIM],
     DimSize_t keep_dims = 1,
     const std::string& name = "") {
     static_assert(DIM<=MaxDim,"Too many kernel dimensions required by ReduceMean, not supported");
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index ee5efa24dc24ebcd5ad4c45491c968caf691eee9..7d346457ead71724ba05da70b5bdf7ad145cbe0c 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -65,7 +65,7 @@ public:
     }
 
     static const std::vector<std::string> getInputsName(){
-        return {"data_input"};
+        return {"data_input_1", "data_input_2"};
     }
     static const std::vector<std::string> getOutputsName(){
         return {"data_output"};
diff --git a/include/aidge/scheduler/Scheduler.hpp b/include/aidge/scheduler/Scheduler.hpp
index 6dcec5aaa4fa80aefebd538a1728445051ca080e..7a81503c967adce3ee000c36ee2f509901cda9ec 100644
--- a/include/aidge/scheduler/Scheduler.hpp
+++ b/include/aidge/scheduler/Scheduler.hpp
@@ -18,6 +18,8 @@
 #include <string>
 #include <vector>
 
+#include "aidge/data/Tensor.hpp"
+
 namespace Aidge {
 class Node;
 class GraphView;
@@ -49,11 +51,17 @@ public:
         mScheduling.clear();
         mStaticSchedule.clear();
     }
+    /**
+     * @brief Place the data tensors inside in the data input tensor of the graphView. In case of multiple data input tensors, they are mapped to producers in the order given by the graph.
+     * 
+     * @param data data input tensors
+     */
+    void connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data);
 
     /**
      * @brief Run the provided Computational Graph with a batch of data
      */
-    void forward(bool forwardDims = true, bool verbose = false);
+    void forward(bool forwardDims = true, bool verbose = false, std::vector<std::shared_ptr<Aidge::Tensor>> data = {});
 
     /**
      * @brief Save in a Markdown file the order of layers execution.
diff --git a/include/aidge/stimuli/Stimulus.hpp b/include/aidge/stimuli/Stimulus.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..80e7c76d4857f577f30b90588f4c3998be80bdb8
--- /dev/null
+++ b/include/aidge/stimuli/Stimulus.hpp
@@ -0,0 +1,107 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#ifndef AIDGE_CORE_STIMULI_STIMULUS_H_
+#define AIDGE_CORE_STIMULI_STIMULUS_H_
+
+#include <string>
+#include <memory>
+#include <tuple>
+
+#include "aidge/backend/StimulusImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+#include "aidge/utils/Registrar.hpp"
+#include "aidge/utils/ErrorHandling.hpp"
+
+namespace Aidge {
+/**
+ * @brief Stimulus. A class wrapping a data sample. Stimulus has two functioning modes. The first mode enables to load data samples from a dataPath and optionnaly store the data in-memory. The second mode enables to store a data sample that was already loaded in memory.
+ * @details When Stimulus is used in the first mode, the loading function is determined automaticaly based on the backend and the file extension.
+ */
+class Stimulus : public Registrable<Stimulus, std::tuple<std::string, std::string>, std::unique_ptr<StimulusImpl>(const std::string&)> {
+private:
+    /// Stimulus data path
+    const std::string mDataPath;
+    const std::string mFileExtension;
+    bool mLoadDataInMemory;
+
+    /// Stimulus data ptr
+    std::shared_ptr<Tensor> mData;
+
+    // Implementation of the Stimulus
+    std::unique_ptr<StimulusImpl> mImpl;
+
+public:
+    Stimulus() = delete;
+
+    /**
+     * @brief Construct a new Stimulus object based on a tensor that is already loaded in memory.
+     *
+     * @param data the data tensor.
+     */
+    Stimulus(const std::shared_ptr<Tensor> data)
+    : mLoadDataInMemory(true),
+      mData(data)
+    {
+        // ctor
+    }
+
+    /**
+     * @brief Construct a new Stimulus object based on a dataPath to load the data.
+     *
+     * @param dataPath path to the data to be loaded.
+     * @param loadDataInMemory when true, keep the data in memory once loaded
+     */
+    Stimulus(const std::string& dataPath, bool loadDataInMemory = false)
+    : mDataPath(dataPath),
+      mFileExtension(dataPath.substr(dataPath.find_last_of(".") + 1)),
+      mLoadDataInMemory(loadDataInMemory)
+    {
+        AIDGE_ASSERT((dataPath.find_last_of(".") !=  std::string::npos), "Cannot find extension");
+    }
+
+    /**
+     * @brief Construct a new Stimulus object copied from another one.
+     * @param otherStimulus
+     */
+    Stimulus(const Stimulus& otherStimulus)
+        : mDataPath(otherStimulus.mDataPath),
+          mFileExtension(otherStimulus.mFileExtension),
+          mLoadDataInMemory(otherStimulus.mLoadDataInMemory),
+          mData(otherStimulus.mData)
+    {
+        if (otherStimulus.mImpl) {
+            mImpl = Registrar<Stimulus>::create({"opencv", mFileExtension})(mDataPath);
+        }
+    }
+
+    virtual ~Stimulus();
+
+public:
+    /**
+     * @brief Set the backend of the stimuli associated load implementation
+     * @details Create and initialize an implementation.
+     * @param name name of the backend.
+     */
+    inline void setBackend(const std::string &name) {
+        mImpl = Registrar<Stimulus>::create({name, mFileExtension})(mDataPath);
+    }
+
+    /**
+     * @brief Get the data tensor associated to the stimuli. The data is either loaded from a datapath or passed from an in-memory tensor.
+     *
+     * @return std::shared_ptr<Tensor> the data tensor.
+     */
+    virtual std::shared_ptr<Tensor> load();
+};
+} // namespace Aidge
+
+#endif // AIDGE_CORE_STIMULI_STIMULUS_H_
diff --git a/python_binding/data/pybind_DataProvider.cpp b/python_binding/data/pybind_DataProvider.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..dfdf188946673c4e2a7ea2dc0829312758d80f96
--- /dev/null
+++ b/python_binding/data/pybind_DataProvider.cpp
@@ -0,0 +1,22 @@
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include "aidge/data/DataProvider.hpp"
+#include "aidge/data/Database.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_DataProvider(py::module& m){
+
+    py::class_<DataProvider, std::shared_ptr<DataProvider>>(m, "DataProvider")
+          .def(py::init<Database&, std::size_t>(), py::arg("database"), py::arg("batchSize"))
+          .def("read_batch", &DataProvider::readBatch, py::arg("start_index"),
+          R"mydelimiter(
+          Return a batch of each data modality.
+
+          :param start_index: Database starting index to read the batch from
+          :type start_index: int
+          )mydelimiter");
+    
+}
+}
diff --git a/python_binding/data/pybind_Database.cpp b/python_binding/data/pybind_Database.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..903e692ca3d14d6ae25f0d6f151b1b08d557d924
--- /dev/null
+++ b/python_binding/data/pybind_Database.cpp
@@ -0,0 +1,13 @@
+#include <pybind11/pybind11.h>
+#include "aidge/data/Database.hpp"
+
+namespace py = pybind11;
+namespace Aidge {
+
+void init_Database(py::module& m){
+
+    py::class_<Database, std::shared_ptr<Database>>(m,"Database");
+
+    
+}
+}
diff --git a/python_binding/data/pybind_Tensor.cpp b/python_binding/data/pybind_Tensor.cpp
index 9fbf08d0b782b6f39b2bef3d0b3ab918f6789ac0..e07f70eaa7de8dc4daa489ec93c8fd9273559ff2 100644
--- a/python_binding/data/pybind_Tensor.cpp
+++ b/python_binding/data/pybind_Tensor.cpp
@@ -30,7 +30,7 @@ void addCtor(py::class_<Tensor,
                         Data,
                         Registrable<Tensor,
                                     std::tuple<std::string, DataType>,
-                                    std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>& mTensor){
+                                    std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>& mTensor){
     mTensor.def(py::init([](
         py::array_t<T, py::array::c_style | py::array::forcecast> b,
         std::string backend = "cpu") {
@@ -60,16 +60,16 @@ void addCtor(py::class_<Tensor,
 void init_Tensor(py::module& m){
     py::class_<Registrable<Tensor,
                            std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>,
+                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>,
                std::shared_ptr<Registrable<Tensor,
                                            std::tuple<std::string, DataType>,
-                                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>>>(m,"TensorRegistrable");
+                                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>>>(m,"TensorRegistrable");
 
     py::class_<Tensor, std::shared_ptr<Tensor>,
                Data,
                Registrable<Tensor,
                            std::tuple<std::string, DataType>,
-                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, NbElts_t length)>> pyClassTensor
+                           std::shared_ptr<TensorImpl>(DeviceIdx_t device, std::vector<DimSize_t> dims)>> pyClassTensor
         (m,"Tensor", py::multiple_inheritance(), py::buffer_protocol());
 
     pyClassTensor.def(py::init<>())
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index eb26538a5db1eb40fdcb8a2e409067483d4a7d68..c41d99c1a5b034424da06aa9a6c5ba5c6aabbca3 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -100,7 +100,7 @@ void init_GraphView(py::module& m) {
 
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims)
+          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
           .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0)
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
diff --git a/python_binding/operator/pybind_Matmul.cpp b/python_binding/operator/pybind_Matmul.cpp
index 72bc0f817fd911f1ba0801fc841df05166388b84..d0d7f28d52a9a9899b08d37a0c1a4a8720f2ae20 100644
--- a/python_binding/operator/pybind_Matmul.cpp
+++ b/python_binding/operator/pybind_Matmul.cpp
@@ -19,16 +19,11 @@
 namespace py = pybind11;
 namespace Aidge {
 
-void declare_MatMul(py::module &m) {
-  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, Attributes, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
+void init_MatMul(py::module &m) {
+  py::class_<MatMul_Op, std::shared_ptr<MatMul_Op>, OperatorTensor>(m, "MatMulOp", py::multiple_inheritance())
   .def("get_inputs_name", &MatMul_Op::getInputsName)
-  .def("get_outputs_name", &MatMul_Op::getOutputsName)
-  .def("attributes_name", &MatMul_Op::staticGetAttrsName);
-
-  m.def("MatMul", &MatMul, py::arg("in_channels"), py::arg("out_channels"), py::arg("name") = "");
-}
+  .def("get_outputs_name", &MatMul_Op::getOutputsName);
 
-void init_MatMul(py::module &m) {
-  declare_MatMul(m);
+  m.def("MatMul", &MatMul, py::arg("name") = "");
 }
 } // namespace Aidge
diff --git a/python_binding/pybind_core.cpp b/python_binding/pybind_core.cpp
index e57b06cc5014e7159f5a3e5927aedfefb996cae4..ebf73e85583d3300ce68078dc8236001a4db1c96 100644
--- a/python_binding/pybind_core.cpp
+++ b/python_binding/pybind_core.cpp
@@ -18,6 +18,8 @@ namespace py = pybind11;
 
 namespace Aidge {
 void init_Data(py::module&);
+void init_Database(py::module&);
+void init_DataProvider(py::module&);
 void init_Tensor(py::module&);
 void init_OperatorImpl(py::module&);
 void init_Attributes(py::module&);
@@ -69,6 +71,8 @@ void init_TensorUtils(py::module&);
 
 void init_Aidge(py::module& m){
     init_Data(m);
+    init_Database(m);
+    init_DataProvider(m);
     init_Tensor(m);
 
     init_Node(m);
diff --git a/python_binding/scheduler/pybind_Scheduler.cpp b/python_binding/scheduler/pybind_Scheduler.cpp
index d963b81d501f5cd2faf4f69810c897bb4b4da86d..4eb715e799158a1ead143430f574f98059662666 100644
--- a/python_binding/scheduler/pybind_Scheduler.cpp
+++ b/python_binding/scheduler/pybind_Scheduler.cpp
@@ -13,13 +13,14 @@
 #include <pybind11/stl.h>
 #include "aidge/scheduler/Scheduler.hpp"
 #include "aidge/graph/GraphView.hpp"
+#include "aidge/data/Tensor.hpp"
 
 namespace py = pybind11;
 namespace Aidge {
 void init_Scheduler(py::module& m){
     py::class_<SequentialScheduler, std::shared_ptr<SequentialScheduler>>(m, "SequentialScheduler")
     .def(py::init<std::shared_ptr<GraphView>&>(), py::arg("graph_view"))
-    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false)
+    .def("forward", &SequentialScheduler::forward, py::arg("forward_dims")=true, py::arg("verbose")=false, py::arg("data")=std::vector<Tensor>())
     .def("save_scheduling_diagram", &SequentialScheduler::saveSchedulingDiagram, py::arg("file_name"))
     .def("resetScheduling", &SequentialScheduler::resetScheduling)
     .def("generate_scheduling", &SequentialScheduler::generateScheduling, py::arg("verbose")=false)
diff --git a/src/data/DataProvider.cpp b/src/data/DataProvider.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..dffb5745d9e324856548387069bcf1d5ff6a7b48
--- /dev/null
+++ b/src/data/DataProvider.cpp
@@ -0,0 +1,85 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <cassert>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <vector>
+
+#include "aidge/data/Database.hpp"
+#include "aidge/data/DataProvider.hpp"
+#include "aidge/data/Tensor.hpp"
+
+
+Aidge::DataProvider::DataProvider(const Aidge::Database& database, const std::size_t batchSize)
+    : mDatabase(database),
+      mNumberModality(database.getItem(0).size()),
+      mBatchSize(batchSize)
+{
+    // Iterating on each data modality in the database
+    // Get the tensor dimensions, datatype and backend of each modality to ensure each data have the same
+    for (const auto& modality : mDatabase.getItem(0)) {
+        mDataSizes.push_back(modality->dims());
+        // assert(std::strcmp(item[i]->getImpl()->backend(), "cpu") == 0 && "DataProvider currently only supports cpu backend tensors");
+        // mDataBackends.push_back(item[i]->getImpl()->backend());
+        mDataTypes.push_back(modality->dataType());
+    }
+}
+
+std::vector<std::shared_ptr<Aidge::Tensor>> Aidge::DataProvider::readBatch(const std::size_t startIndex) const
+{
+    assert((startIndex) <= mDatabase.getLen() && " DataProvider readBatch : database fetch out of bounds");
+
+
+    // Determine the batch size (may differ for the last batch)
+    const std::size_t current_batch_size = ((startIndex + mBatchSize) > mDatabase.getLen()) ?
+                                            mDatabase.getLen()-startIndex :
+                                            mBatchSize;
+
+    // Create batch tensors (dimensions, backends, datatype) for each modality
+    std::vector<std::shared_ptr<Tensor>> batchTensors;
+    auto dataBatchSize = mDataSizes;
+    for (std::size_t i = 0; i < mNumberModality; ++i) {
+        dataBatchSize[i].insert(dataBatchSize[i].begin(), current_batch_size);
+        auto batchData = std::make_shared<Tensor>();
+        batchData->resize(dataBatchSize[i]);
+        // batchData->setBackend(mDataBackends[i]);
+        batchData->setBackend("cpu");
+        batchData->setDataType(mDataTypes[i]);
+        batchTensors.push_back(batchData);
+    }
+
+    // Call each database item and concatenate each data modularity in the batch tensors
+    for (std::size_t i = 0; i < current_batch_size; ++i){
+
+        auto dataItem = mDatabase.getItem(startIndex+i);
+        // assert same number of modalities
+        assert(dataItem.size() == mNumberModality && "DataProvider readBatch : item from database have inconsistent number of modality.");
+
+        // Browse each modularity in the database item
+        for (std::size_t j = 0; j < mNumberModality; ++j) {
+            auto dataSample = dataItem[j];
+
+            // Assert tensor sizes
+            assert(dataSample->dims() == mDataSizes[j] && "DataProvider readBatch : corrupted Data size");
+
+            // Assert implementation backend
+            // assert(dataSample->getImpl()->backend() == mDataBackends[j] && "DataProvider readBatch : corrupted data backend");
+
+            // Assert DataType
+            assert(dataSample->dataType() == mDataTypes[j] && "DataProvider readBatch : corrupted data DataType");
+
+            // Concatenate into the batch tensor
+            batchTensors[j]->getImpl()->copy(dataSample->getImpl()->rawPtr(), dataSample->size(), i*dataSample->size());
+        }
+    }
+    return batchTensors;
+}
\ No newline at end of file
diff --git a/src/data/Tensor.cpp b/src/data/Tensor.cpp
index d45dee5639a6bc082871e1110657392fb97c15ec..4d8e0dcd7d29b47b7a3591652c6d3002698ab29c 100644
--- a/src/data/Tensor.cpp
+++ b/src/data/Tensor.cpp
@@ -9,10 +9,145 @@
  *
  ********************************************************************************/
 
+#include <vector>
+#include <cstddef>
+
 #include "aidge/data/Tensor.hpp"
 #include "aidge/utils/Types.h"
 #include "aidge/utils/ErrorHandling.hpp"
 
+void Aidge::Tensor::resize(const std::vector<Aidge::DimSize_t> &dims, std::vector<Aidge::DimSize_t> strides) {
+    bool checkContiguous = true;
+    if (strides.empty()) {
+        strides.resize(dims.size());
+        size_t expectedStride = 1;
+        for (int dim = dims.size() - 1; dim >= 0; --dim) {
+            strides[dim] = expectedStride;
+            expectedStride*= dims[dim];
+        }
+        checkContiguous = false;
+    }
+    else {
+        AIDGE_ASSERT(strides.size() == dims.size(), "Number of strides must match number of dims");
+    }
+
+    if (mImpl.use_count() > 1) {
+        // Here we could also create a new storage for this tensor in this case
+        // But, is it more likely that the user really wants this, or that he did a mistake?
+        AIDGE_ASSERT(dims == mDims && strides == mStrides, "Cannot resize Tensor with shared storage");
+    }
+    else {
+        mDims = dims;
+        mStrides = strides;
+
+        mContiguous = true;
+        if (checkContiguous) {
+            std::size_t expectedStride = 1;
+            for (std::size_t i = dims.size()-1; i > 0; --i) {
+                if (strides[i] != expectedStride) {
+                    mContiguous = false;
+                    break;
+                }
+                expectedStride*= dims[i];
+            }
+            mContiguous &= (strides[0] == expectedStride);
+        }
+
+        computeSize();
+        if (mImpl) {
+            mImpl->resize(mDims);
+        }
+    }
+}
+
+std::string Aidge::Tensor::toString() const {
+    AIDGE_ASSERT(mImpl && (dims().empty() || (dims() == std::vector<DimSize_t>({0})) || (mImpl->hostPtr() != nullptr)), "tensor should have a valid host pointer");
+
+    // TODO: move lambda elsewhere?
+    auto ptrToString = [](DataType dt, void* ptr, std::size_t idx) {
+        switch (dt) {
+        case DataType::Float64:
+            return std::to_string(static_cast<double*>(ptr)[idx]);
+        case DataType::Float32:
+            return std::to_string(static_cast<float*>(ptr)[idx]);
+        case DataType::Float16:
+            return std::to_string(static_cast<half_float::half*>(ptr)[idx]);
+        case DataType::Int8:
+            return std::to_string(static_cast<int8_t*>(ptr)[idx]);
+        case DataType::Int16:
+            return std::to_string(static_cast<int16_t*>(ptr)[idx]);
+        case DataType::Int32:
+            return std::to_string(static_cast<int32_t*>(ptr)[idx]);
+        case DataType::Int64:
+            return std::to_string(static_cast<int64_t*>(ptr)[idx]);
+        case DataType::UInt8:
+            return std::to_string(static_cast<uint8_t*>(ptr)[idx]);
+        case DataType::UInt16:
+            return std::to_string(static_cast<uint16_t*>(ptr)[idx]);
+        case DataType::UInt32:
+            return std::to_string(static_cast<uint32_t*>(ptr)[idx]);
+        case DataType::UInt64:
+            return std::to_string(static_cast<uint64_t*>(ptr)[idx]);
+        default:
+            AIDGE_ASSERT(true, "unsupported type to convert to string");
+        }
+        return std::string("?");  // To make Clang happy
+    };
+
+    if (dims().empty()) { return ptrToString(mDataType, mImpl->hostPtr(), 0); }
+    std::string res;
+    std::size_t dim = 0;
+    std::size_t counter = 0;
+    if (nbDims()>=2) {
+        std::vector<std::size_t> dimVals(nbDims(), 0);
+        res += "{\n";
+        while (counter < mSize) {
+            std::string spaceString = std::string((dim+1)<<1,' ');
+            if (dim < nbDims()-2) {
+                if (dimVals[dim] == 0) {
+                    res += spaceString + "{\n";
+                    ++dim;
+                } else if (dimVals[dim] < static_cast<std::size_t>(dims()[dim])) {
+                    res += spaceString + "},\n" + spaceString + "{\n";
+                    ++dim;
+                } else {
+                    res += spaceString + "}\n";
+                    dimVals[dim--] = 0;
+                    dimVals[dim]++;
+                }
+            } else {
+                for (; dimVals[dim] < static_cast<std::size_t>(dims()[dim]); ++dimVals[dim]) {
+                    res += spaceString + "{";
+                    for (DimSize_t j = 0; j < dims()[dim + 1] - 1; ++j) {
+                        res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + ",";
+                    }
+                    res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), counter++) + "}";
+                    if (dimVals[dim] < static_cast<std::size_t>(dims()[dim] - 1)) {
+                        res += ",";
+                    }
+                    res += "\n";
+                }
+                if (dim == 0) {
+                    break;
+                }
+                dimVals[dim--] = 0;
+                dimVals[dim]++;
+            }
+        }
+
+        for(int i = static_cast<int>(dim); i > 0; --i) {
+            res += std::string((dim+1)<<1,' ') + "}\n";
+        }
+    } else {
+        res += "{";
+        for (DimSize_t j = 0; j < dims()[0]; ++j) {
+            res += " " + ptrToString(mDataType, mImpl->hostPtr(mImplOffset), j) + ((j < dims()[0]-1) ? "," : " ");
+        }
+    }
+    res += "}";
+    return res;
+}
+
 Aidge::Tensor Aidge::Tensor::extract(const std::vector<std::size_t>& coordIdx) const {
     AIDGE_ASSERT(isContiguous(), "Tensor must be contiguous");
     AIDGE_ASSERT(coordIdx.size() <= mDims.size(), "Number of coordinates is higher than number of dimensions");
@@ -44,7 +179,7 @@ void Aidge::Tensor::makeContiguous() {
     // Block so that mImpl ref count is 1 for resize()
     {
         // Create a new storage that will be contiguous
-        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mSize);
+        std::shared_ptr<TensorImpl> newImpl = Registrar<Tensor>::create({mImpl->backend(), mDataType})(mImpl->device().second, mDims);
         // Copy elements from old to new storage
         size_t idx = 0;
         while (idx < mSize) {
@@ -52,7 +187,7 @@ void Aidge::Tensor::makeContiguous() {
 
             // Determine the size of the contiguous chunk
             size_t copySize = 1;
-            while (idx + copySize < mSize && 
+            while (idx + copySize < mSize &&
                 getStorageIdx(getCoord(idx + copySize)) == storageIdx + copySize)
             {
                 ++copySize;
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 968e98e75cc587977eb3033fe7f25936880755a4..a93d9af8a972605b1519e9974971ff9e7ad3ef2f 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -265,10 +265,18 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims();
 }
 
-void Aidge::GraphView::forwardDims() {
+void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
+    if (!dims.empty()){
+      AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of dimensions and graph inputs");
+      for (std::size_t i = 0; i < dims.size(); ++i){
+        auto tensor = std::make_shared<Tensor>(dims[i]);
+        mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor);
+      }
+    }
+      
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
         for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
             // assess if the input was not already set and is a Tensor then link it to parent output
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 4e638fd86da487565a89760925e45339213fa8f9..a54302d06059d43336800d81e4d18744b6243785 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -9,8 +9,53 @@
  *
  ********************************************************************************/
 
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
+#include <vector>
 
 #include "aidge/operator/Add.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::Add_Op::Type = "Add";
\ No newline at end of file
+const std::string Aidge::Add_Op::Type = "Add";
+
+void Aidge::Add_Op::computeOutputDims() {
+    // check inputs have been associated
+    bool associated = (nbInputs() > 0); // do not compute anything if no input
+    for (IOIndex_t i = 0; i < nbInputs(); ++i) {
+        if (!getInput(i)) {
+            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+        }
+        associated &= !(getInput(i)->empty());
+    }
+    if (associated) {
+        std::vector<std::vector<std::size_t>> inputsDims(nbInputs());
+        for (std::size_t i = 0; i < nbInputs(); i++) {
+            inputsDims[i] = getInput(i)->dims();
+        }
+
+        std::size_t outNbDims = 1;
+        for(std::size_t i = 0; i < nbInputs(); ++i) {
+            outNbDims = (inputsDims[i].size() > outNbDims) ? inputsDims[i].size() : outNbDims;
+        }
+
+        std::vector<std::size_t> outDims(outNbDims, 1);
+
+        for (auto it = outDims.rbegin(); it != outDims.rend(); ++it) {
+            for (std::size_t i = 0; i < nbInputs(); ++i) {
+                if(!inputsDims[i].empty()) {
+                    const std::size_t dim = inputsDims[i].back();
+                    inputsDims[i].pop_back();
+                    if (*it == 1) {
+                        *it = dim;
+                    }
+                    else if ((dim != *it) && (dim != 1)) {
+                        AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Add operation");
+                    }
+                }
+            }
+        }
+        mOutputs[0]->resize(outDims);
+    }
+}
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 85db3ac6ef66c837c86dbece288185deaca88ba6..6b55338f4ab7ac9131231fcced21869274c1bd47 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -9,11 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
 #include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Div.hpp"
@@ -28,11 +27,27 @@ void Aidge::Div_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // div by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // div elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // div by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 666ed3921ed1190a91935bd9f38303e23963d912..f48c7ca81d6abd1d5150f54eb7d98bf109307d33 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -9,8 +9,64 @@
  *
  ********************************************************************************/
 
+#include <algorithm>
 #include <string>
+#include <vector>
 
 #include "aidge/operator/MatMul.hpp"
+#include "aidge/utils/Types.h"
+#include "aidge/utils/ErrorHandling.hpp"
 
-const std::string Aidge::MatMul_Op::Type = "MatMul";
\ No newline at end of file
+const std::string Aidge::MatMul_Op::Type = "MatMul";
+
+void Aidge::MatMul_Op::computeOutputDims() {
+    if (!getInput(0) || !getInput(1)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
+    }
+    if (getInput(0)->empty() && getInput(1)->empty()) {
+        // both inputs are scalar
+        mOutputs[0]->resize({});
+    }
+    else if (!getInput(0)->empty() && !getInput(1)->empty())
+    {
+        std::vector<std::size_t> dims0 = getInput(0)->dims();
+        std::vector<std::size_t> dims1 = getInput(1)->dims();
+
+        // keep second-to-last dimension of dims0
+        const bool keepDim0 = dims0.size() > 1;
+        // keep last dimension of dims1
+        const bool keepDim1 = dims1.size() > 1;
+
+        if (dims0.size() == 1) {
+            dims0.insert(dims0.cbegin(), 1);
+        }
+        if (dims1.size() == 1) {
+            dims1.push_back(1);
+        }
+        const std::size_t dims_size = std::max(dims0.size(), dims1.size());
+
+
+        if (dims0.size() > dims1.size()) {
+            dims1.insert(dims1.cbegin(), dims0.size() - dims1.size(), std::size_t(1));
+        }
+        else if (dims1.size() > dims0.size()) {
+            dims0.insert(dims0.cbegin(), dims1.size() - dims0.size(), std::size_t(1));
+        }
+
+        AIDGE_ASSERT(dims0[dims_size-1] == dims1[dims_size-2], "Incompatible matrices sizes.");
+
+        std::vector<std::size_t> outDims = std::vector<std::size_t>(dims_size-2, 1);
+        for (std::size_t i = 0; i < dims_size-2; ++i) {
+            AIDGE_ASSERT((dims0[i] == dims1[i]) || (dims0[i] == 1) || (dims1[i] == 1), "Bad vector dimension.");
+            outDims[i] = std::max(dims0[i], dims1[i]);
+        }
+
+        // use keepDim0 instead of dims0.size() because dims0 has been modified
+        if (keepDim0)
+            outDims.push_back(dims0[dims_size-2]);
+        if (keepDim1)
+            outDims.push_back(dims1[dims_size-1]);
+
+        mOutputs[0]->resize(outDims);
+    }
+}
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index bc268263e8a6e2ec7c9944faa31da84dc50c4f53..e6d8b017c0c3e5effb43dd789b569f283154e80d 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Mul.hpp"
@@ -27,11 +27,27 @@ void Aidge::Mul_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // mul by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // mul elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // mul by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index de1f0c3694f51fbd5b365573f61d3e3e2b9109ff..5e29eae0c0f42e7d566a933e9409766026369dad 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Pow.hpp"
@@ -27,11 +27,27 @@ void Aidge::Pow_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // pow by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // pow elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // pow by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 639eaf798c1c2a9a6685e8b8d2c4a2cb00a4b57a..9d933bf6c97348842fae8f405d3e709e68d56916 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -9,10 +9,10 @@
  *
  ********************************************************************************/
 
-#include <cassert>
-#include <cstddef>
+#include <cstddef>    // std::size_t
+#include <stdexcept>  // std::runtime_error
+#include <string>
 #include <vector>
-#include <utility>
 
 #include "aidge/backend/OperatorImpl.hpp"
 #include "aidge/operator/Sub.hpp"
@@ -27,11 +27,27 @@ void Aidge::Sub_Op::computeOutputDims() {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
     }
 
-    if ((!getInput(0)->empty()) &&
-        ((getInput(1)->size() == 1) || // sub by a single value
-        (getInput(1)->size() == getInput(0)->size()) || // sub elem-wise
-        (getInput(1)->nbDims() == 1 && getInput(1)->size() == getInput(0)->dims()[getInput(0)->nbDims()-1]))) // sub by a Tensor with one dimension of output size
-    {
-        mOutputs[0]->resize(getInput(0)->dims());
+    if (!getInput(0)->empty() && !getInput(1)->empty()) {
+
+        const std::vector<std::size_t>& inputsDims0 = getInput(0)->dims();
+        const std::vector<std::size_t>& inputsDims1 = getInput(1)->dims();
+
+        std::vector<std::size_t> outDims = (inputsDims0.size() >= inputsDims1.size()) ? inputsDims0 : inputsDims1;
+        const std::vector<std::size_t>& lowDims = (inputsDims0.size() < inputsDims1.size()) ? inputsDims0 : inputsDims1;
+
+        std::size_t out_id = outDims.size() - 1;
+        std::size_t low_id = lowDims.size() - 1;
+        std::size_t i = 0;
+        while (i++ < lowDims.size()) {
+            if (outDims[out_id] == 1) {
+                outDims[out_id] = lowDims[low_id];
+            }
+            else if ((lowDims[low_id] != 1) && (lowDims[low_id] != outDims[out_id])) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Unsopported Tensor shape for Div Operation");
+            }
+            --out_id;
+            --low_id;
+        }
+        mOutputs[0]->resize(outDims);
     }
 }
\ No newline at end of file
diff --git a/src/recipies/FuseMulAdd.cpp b/src/recipies/FuseMulAdd.cpp
index 322b1d9a0632b893a912c6225ac5b13d63278f8d..85bfc408f092d9f234265db51a01eff1ab64005b 100644
--- a/src/recipies/FuseMulAdd.cpp
+++ b/src/recipies/FuseMulAdd.cpp
@@ -41,7 +41,19 @@ void Aidge::fuseMulAdd(std::shared_ptr<Aidge::Node> matmulNode, std::shared_ptr<
     AIDGE_ASSERT(matmulNode->getParent(1), "No weight detected to produce the fuseMulAdd recipe.");
 
     std::shared_ptr<Node> weight = matmulNode->getParent(1)->cloneSharedOperators();
-    const DimSize_t outSize = std::dynamic_pointer_cast<MatMul_Op>(matmulNode->getOperator()) -> getAttr<DimSize_t>("OutChannels");
+    // TODO: find another way to get OutChannels for FC operator.
+    // This poor fix supposes that one of Add inputs is a const and has the same outChannels as the output
+    DimSize_t outSize = 0;
+    const auto& op = std::dynamic_pointer_cast<OperatorTensor>(addNode->getOperator());
+    for (size_t i = 0; i < op->nbInputs(); i++)
+    {
+        const auto& inTensor = op->getInput(i);
+        if(inTensor->nbDims() > 0) {
+            outSize = inTensor->dims()[inTensor->nbDims()-1];
+            break;
+        }
+    }
+    AIDGE_ASSERT(outSize, "Couldnt get output number of channels for FC operator.");
 
     // Instanciate FC
     //std::shared_ptr<Node> fc = FC(dim[0], false, "Fc");
diff --git a/src/scheduler/Scheduler.cpp b/src/scheduler/Scheduler.cpp
index 3afbcd0442fd40214687751d50bfc98809bba840..380ff8bf3ebabc1a7f7bf7c6f53d05fe99ab30dd 100644
--- a/src/scheduler/Scheduler.cpp
+++ b/src/scheduler/Scheduler.cpp
@@ -174,8 +174,28 @@ void Aidge::SequentialScheduler::generateScheduling(bool verbose) {
 
 }
 
+void Aidge::SequentialScheduler::connectInputs(std::vector<std::shared_ptr<Aidge::Tensor>> data){
+    // This version of connect inputs only connects tensor inputs in input data producers.
+    auto inputNodes = mGraphView->getOrderedInputs();
+
+    // Assert that the number of input data producers corresponds to the number of data input
+    assert(data.size() == inputNodes.size()  && "Scheduler connectInput error - Inconsistent number of graph inputs and inputs passed to the graph");
+    
+    for (std::size_t i = 0; i < data.size(); ++i){
+        // TODO : maybe shallow copy instead of deepcopy
+        inputNodes[i].first->getOperator()->setInput(inputNodes[i].second, data[i]);
+    }
+}
+
+
 // TODO: handle multiple inputs/outputs
-void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose) {
+void Aidge::SequentialScheduler::forward(bool forwardDims, bool verbose, std::vector<std::shared_ptr<Aidge::Tensor>> data) {
+    
+    // Collect all data input of the graph (that are producers)
+    if (!data.empty()){
+        connectInputs(data);
+    }
+
     // Forward dims (if allowed)
     if (forwardDims) {mGraphView->forwardDims(); }
 
diff --git a/src/stimuli/Stimulus.cpp b/src/stimuli/Stimulus.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6a91534475f6aaff44d5a2cd4da013434a99f9bf
--- /dev/null
+++ b/src/stimuli/Stimulus.cpp
@@ -0,0 +1,30 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include "aidge/stimuli/Stimulus.hpp"
+
+#include <memory>
+
+#include "aidge/data/Tensor.hpp"
+
+Aidge::Stimulus::~Stimulus() = default;
+
+std::shared_ptr<Aidge::Tensor> Aidge::Stimulus::load() {
+    AIDGE_ASSERT((mImpl!=nullptr || mData!=nullptr), "No load implementation and No stored data");
+
+    if (mLoadDataInMemory){
+        if (mData == nullptr){
+            mData = mImpl->load();
+        }
+        return mData;
+    }
+    return mImpl->load();
+}
\ No newline at end of file
diff --git a/unit_tests/data/Test_TensorImpl.cpp b/unit_tests/data/Test_TensorImpl.cpp
index cfcfb45e3735538c1650cfd990ea85e2333916ad..e734fcd7770483dbcd9f594847ffd4297c071e68 100644
--- a/unit_tests/data/Test_TensorImpl.cpp
+++ b/unit_tests/data/Test_TensorImpl.cpp
@@ -19,7 +19,7 @@
 
 using namespace Aidge;
 
-TEST_CASE("Tensor creation") {
+TEST_CASE("[core/data] Tensor creation") {
   SECTION("from const array") {
     Tensor x = Array3D<int, 2, 2, 2>{{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}};
 
@@ -59,7 +59,34 @@ TEST_CASE("Tensor creation") {
   }
 }
 
-TEST_CASE("Tensor methods") {
+TEST_CASE("Tensor fill") {
+  SECTION("Instantiate batches independantly") {
+    // initialization with 0s
+    std::shared_ptr<Tensor> concatenatedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{});
+    //concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> myTensor1 = std::make_shared<Tensor>(Array1D<int, 5>{{1,2,3,4,5}});
+    std::shared_ptr<Tensor> myTensor2 = std::make_shared<Tensor>(Array1D<int, 5>{{6,7,8,9,10}});
+    std::shared_ptr<Tensor> myTensor3 = std::make_shared<Tensor>(Array1D<int, 5>{{11,12,13,14,15}});
+
+    // use copy function from implementation
+    concatenatedTensor->getImpl()->copy(myTensor1->getImpl()->rawPtr(), 5, 0);
+    concatenatedTensor->getImpl()->copy(myTensor2->getImpl()->rawPtr(), 5, 5);
+    concatenatedTensor->getImpl()->copy(myTensor3->getImpl()->rawPtr(), 5, 10);
+    // concatenatedTensor->print();
+
+    std::shared_ptr<Tensor> expectedTensor= std::make_shared<Tensor>(Array2D<int, 3, 5>{
+      {{1,2,3,4,5},
+      {6,7,8,9,10},
+      {11,12,13,14,15}}
+    });
+    // expectedTensor->print();
+
+    REQUIRE(*concatenatedTensor == *expectedTensor);
+  }
+}
+
+TEST_CASE("[core/data] Tensor methods","[Tensor]") {
   Tensor x = Array3D<int, 2, 2, 2>{{
     {{1, 2},
      {3, 4}},
@@ -89,7 +116,7 @@ TEST_CASE("Tensor methods") {
     REQUIRE(y.getImpl() == x.getImpl());
     REQUIRE(approxEq<int>(y, Array1D<int, 2>{{3, 4}}));
     REQUIRE(y.isContiguous());
-    
+
     Tensor y2 = x.extract({0, 1, 1}, {2, 1, 1});
     REQUIRE(y2.getImpl() == x.getImpl());
     REQUIRE(!y2.isContiguous());
diff --git a/unit_tests/graphRegex/Test_GraphRegex.cpp b/unit_tests/graphRegex/Test_GraphRegex.cpp
index 924aac79ea8492f6ea0f2cd4d93676876c5a8331..1330a8e620ae5d49d6ef61257a587b914ffed1cd 100644
--- a/unit_tests/graphRegex/Test_GraphRegex.cpp
+++ b/unit_tests/graphRegex/Test_GraphRegex.cpp
@@ -126,9 +126,9 @@ TEST_CASE("GraphRegexUser") {
     SECTION("Applied Recipes"){
 
       // generate the original GraphView
-        auto matmul0 = MatMul(5, 5, "matmul0");
+        auto matmul0 = MatMul("matmul0");
         auto add0 = Add(2, "add0");
-        auto matmul1 = MatMul(5, 5, "matmul1");
+        auto matmul1 = MatMul("matmul1");
         auto add1 = Add(2, "add1");
 
         auto b0 = Producer({5}, "B0");
@@ -154,7 +154,7 @@ TEST_CASE("GraphRegexUser") {
 
 
         auto g = std::make_shared<GraphView>();
-        g->add({matmul0, add0, matmul1, add1, b0, b1,fl,fc});
+        g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1,fl,fc});
 
         std::shared_ptr<GraphRegex> kitchenBook = std::make_shared<GraphRegex>();
 
diff --git a/unit_tests/operator/Test_Div_Op.cpp b/unit_tests/operator/Test_Div_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e659742c0bd200fa33b598f581cfef7b2f1e432e
--- /dev/null
+++ b/unit_tests/operator/Test_Div_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Div.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Div_Op(computeOutputDims)", "[Div][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Div Operator
+    std::shared_ptr<Node> myDiv = Div();
+    auto op = std::static_pointer_cast<OperatorTensor>(myDiv -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_MatMul_Op.cpp b/unit_tests/operator/Test_MatMul_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6c810e675ad46cc5580bd24e57f7e7dbb84db38f
--- /dev/null
+++ b/unit_tests/operator/Test_MatMul_Op.cpp
@@ -0,0 +1,196 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/MatMul.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] MatMul_Op(computeOutputDims)", "[MatMul][computeOutputDims]") {
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dist(1, 10);
+
+    // Create MatMul Operator
+    std::shared_ptr<Node> myMatMul = MatMul();
+    auto op = std::static_pointer_cast<OperatorTensor>(myMatMul -> getOperator());
+
+    /** @todo Special case of scalar Tensor objects.
+     * Not handled yet.
+    */
+    // SECTION("0-D / 0-D") {
+    //     std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    //     T0->resize({});
+    //     op -> associateInput(0,T0);
+
+    //     // input_1 - right
+    //     std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    //     T1->resize({});
+    //     op -> associateInput(1,T1);
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims()).empty());
+
+    //     // input_1 - wrong
+    //     T1->resize({dist(gen)});
+
+    //     REQUIRE_THROWS(op->computeOutputDims());
+    // }
+
+    SECTION("1-D / N-D") {
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        const std::size_t dim0 = dist(gen);
+        T0->resize({dim0});
+        op -> associateInput(0,T0);
+
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        op -> associateInput(1,T1);
+
+        SECTION("1-D / 1-D") {
+            // input_1 - right
+            T1->resize({dim0});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()).empty());
+
+            // input_1 - wrong
+            T1->resize({dim0+1});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("1-D / 2-D") {
+            // input_1 - right
+            const std::size_t dim1 = dist(gen);
+            T1->resize({dim0,dim1});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1}));
+
+            // input_1 - wrong
+            T1->resize({dim0+1,dim1});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("1-D / +2-D") {
+            // input_1 - right
+            const std::size_t dim1 = dist(gen);
+            const std::size_t dim2 = dist(gen);
+            const std::size_t dim3 = dist(gen);
+            T1->resize({dim1,dim2,dim0,dim3});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim1,dim2,dim3}));
+        }
+    }
+    SECTION("2-D / N-D") {
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        const std::size_t dim0 = dist(gen);
+        const std::size_t dim1 = dist(gen);
+        T0->resize({dim0,dim1});
+        op -> associateInput(0,T0);
+
+        // input_1
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        op -> associateInput(1,T1);
+
+        SECTION("2-D / 1-D") {
+            // input_1 - right
+            T1->resize({dim1});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0}));
+
+            // input_1 - wrong
+            T1->resize({dim1+1});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("2-D / 2-D") {
+            // input_1 - right
+            const std::size_t dim2 = dist(gen);
+            T1->resize({dim1, dim2});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim2}));
+
+            // input_1 - wrong
+            T1->resize({dim1+1,dim2});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+        SECTION("2-D / +2-D") {
+            // input_1 - right
+            const std::size_t dim2 = dist(gen);
+            const std::size_t dim3 = dist(gen);
+            const std::size_t dim4 = dist(gen);
+            T1->resize({dim3,dim4,dim1, dim2});
+
+            REQUIRE_NOTHROW(op -> computeOutputDims());
+            REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim3,dim4,dim0,dim2}));
+
+            // input_1 - wrong
+            T1->resize({dim3,dim4,dim1+1,dim2});
+
+            REQUIRE_THROWS(op -> computeOutputDims());
+        }
+    }
+    SECTION("+2-D / +2-D") {
+        // input_0
+        std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+        const std::size_t dim0 = dist(gen) + 1;
+        const std::size_t dim1 = 1;
+        const std::size_t dim2 = dist(gen);
+        const std::size_t dim3 = dist(gen);
+        T0->resize({dim0,dim1,dim2,dim3});
+        op -> associateInput(0,T0);
+
+        // input_1
+        std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+        op -> associateInput(1,T1);
+
+        // input_1 - right
+        // 1
+        const std::size_t dim5 = dist(gen);
+        T1->resize({dim0,dim1,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
+
+        // 2 - input_1 broadcast
+        T1->resize({1,dim1,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1,dim2,dim5}));
+
+        // 3 - input_0 broadcast
+        const std::size_t dim1_bigger = dist(gen) + 1;
+        T1->resize({dim0,dim1_bigger,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
+
+        // 4 - input_0+input_1 broadcast
+        T1->resize({1,dim1_bigger,dim3,dim5});
+        REQUIRE_NOTHROW(op -> computeOutputDims());
+        REQUIRE(op->getOutput(0)->dims() == std::vector<std::size_t>({dim0,dim1_bigger,dim2,dim5}));
+
+        // input_1 - wrong
+        T1->resize({dim0+1,dim1,dim3,dim5});
+        REQUIRE_THROWS(op -> computeOutputDims());
+    }
+}
+} // namespace Aidge
\ No newline at end of file
diff --git a/unit_tests/operator/Test_Mul_Op.cpp b/unit_tests/operator/Test_Mul_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d3e0c5e086fac9d31db817d628214e95d4e41a32
--- /dev/null
+++ b/unit_tests/operator/Test_Mul_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Mul.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Mul_Op(computeOutputDims)", "[Mul][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Mul Operator
+    std::shared_ptr<Node> myMul = Mul();
+    auto op = std::static_pointer_cast<OperatorTensor>(myMul -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Pow_Op.cpp b/unit_tests/operator/Test_Pow_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c77615c11e99c174707df21560044fdd3b6a3c42
--- /dev/null
+++ b/unit_tests/operator/Test_Pow_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Pow.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Pow_Op(computeOutputDims)", "[Pow][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Pow Operator
+    std::shared_ptr<Node> myPow = Pow();
+    auto op = std::static_pointer_cast<OperatorTensor>(myPow -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/operator/Test_Sub_Op.cpp b/unit_tests/operator/Test_Sub_Op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b7b744410d31ea32dea5a15cc7a29da093488d14
--- /dev/null
+++ b/unit_tests/operator/Test_Sub_Op.cpp
@@ -0,0 +1,144 @@
+/********************************************************************************
+ * Copyright (c) 2023 CEA-List
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0
+ *
+ ********************************************************************************/
+
+#include <catch2/catch_test_macros.hpp>
+#include <cstddef>  // std::size_t
+#include <memory>
+#include <random>   // std::random_device, std::mt19937, std::uniform_int_distribution
+#include <vector>
+
+#include "aidge/data/Tensor.hpp"
+#include "aidge/operator/Sub.hpp"
+#include "aidge/operator/OperatorTensor.hpp"
+
+namespace Aidge {
+TEST_CASE("[core/operator] Sub_Op(computeOutputDims)", "[Sub][computeOutputDims]") {
+    constexpr std::uint16_t NBTRIALS = 10;
+
+    // Create a random number generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<std::size_t> dimsDist(1, 10);
+    std::uniform_int_distribution<std::size_t> nbDimsDist(1, 5);
+
+    // Create Sub Operator
+    std::shared_ptr<Node> mySub = Sub();
+    auto op = std::static_pointer_cast<OperatorTensor>(mySub -> getOperator());
+
+    // input_0
+    std::shared_ptr<Tensor> T0 = std::make_shared<Tensor>();
+    op -> associateInput(0,T0);
+    // input_1
+    std::shared_ptr<Tensor> T1 = std::make_shared<Tensor>();
+    op -> associateInput(1,T1);
+
+    /**
+     * @todo Special case: scalar not handled yet by
+     * ``OperatorTensor::computeOutputDims()``
+     */
+    // SECTION("Scalar / Scalar") {
+    //     // input_0
+    //     T0->resize({});
+
+    //     // input_1
+    //     T1->resize({});
+
+    //     REQUIRE_NOTHROW(op->computeOutputDims());
+    //     REQUIRE((op->getOutput(0)->dims() == std::vector<std::size_t>()));
+    // }
+    // SECTION("Scalar / +1-D") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_0
+    //     T0->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_1
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T1->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    // SECTION("+1-D / Scalar") {
+    //     // a scalar is compatible with any other Tensor
+    //     // input_1
+    //     T1->resize({});
+
+    //     for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+
+    //         // input_0
+    //         const std::size_t nb_dims = nbDimsDist(gen);
+    //         std::vector<std::size_t> dims(nb_dims);
+    //         for (std::size_t i = 0; i < nb_dims; ++i) {
+    //             dims[i] = dimsDist(gen);
+    //         }
+    //         T0->resize(dims);
+
+    //         REQUIRE_NOTHROW(op->computeOutputDims());
+    //         REQUIRE((op->getOutput(0)->dims()) == dims);
+    //     }
+    // }
+    SECTION("+1-D / +1-D") {
+        // same size
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 1;
+            }
+
+            T0->resize(dims0);
+            T1->resize(dims0);
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dims0);
+        }
+
+        // broadcast
+        for (std::uint16_t trial = 0; trial < NBTRIALS; ++trial) {
+            const std::size_t nb_dims = nbDimsDist(gen) + 1;
+            std::vector<std::size_t> dims0(nb_dims);
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                dims0[i] = dimsDist(gen) + 2;
+            }
+            std::vector<std::size_t> dimsOut = dims0;
+            std::vector<std::size_t> dims1 = dims0;
+            for (std::size_t i = 0; i < nb_dims; ++i) {
+                if (dimsDist(gen) <= 5) {
+                    dims1[i] = 1;
+                }
+            }
+            dims1.erase(dims1.cbegin(), dims1.cbegin() + std::min(nbDimsDist(gen), nb_dims-1));
+
+            T0->resize(dims0);
+            T1->resize(dims1);
+
+            REQUIRE_NOTHROW(op->computeOutputDims());
+            REQUIRE((op->getOutput(0)->dims()) == dimsOut);
+
+            // input_0 - wrong
+            // T1->resize({dims[0] + 1});
+            std::vector<std::size_t> dims1_wrong = dims1;
+            for (std::size_t i = 0; i < dims1.size(); ++i) {
+                ++dims1_wrong[i];
+            }
+            T1->resize(dims1_wrong);
+            REQUIRE(dims0 != dims1_wrong);
+            REQUIRE_THROWS(op->computeOutputDims());
+        }
+    }
+}
+} // namespace Aidge
diff --git a/unit_tests/recipies/Test_FuseMulAdd.cpp b/unit_tests/recipies/Test_FuseMulAdd.cpp
index 968826230dfdf85290ee377aee155e06855c4b28..d0875fe10078eb9d8e3a97e0703191b5697f3fda 100644
--- a/unit_tests/recipies/Test_FuseMulAdd.cpp
+++ b/unit_tests/recipies/Test_FuseMulAdd.cpp
@@ -25,9 +25,9 @@ namespace Aidge {
 
 TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
     // generate the original GraphView
-    auto matmul0 = MatMul(5, 5, "matmul0");
+    auto matmul0 = MatMul("matmul0");
     auto add0 = Add(2, "add0");
-    auto matmul1 = MatMul(5, 5, "matmul1");
+    auto matmul1 = MatMul("matmul1");
     auto add1 = Add(2, "add1");
 
     auto b0 = Producer({5}, "B0");
@@ -49,7 +49,7 @@ TEST_CASE("[cpu/recipies] FuseMulAdd", "[FuseMulAdd][recipies]") {
     b1->addChild(add1, 0, 1);
 
     auto g = std::make_shared<GraphView>();
-    g->add({matmul0, add0, matmul1, add1, b0, b1});
+    g->add({w0, matmul0, b0, add0, w1, matmul1, b1, add1});
 
     // Check original graph
     REQUIRE(g->getNodes() ==
diff --git a/version.txt b/version.txt
index 8a9ecc2ea99d607e92feae1656ddbf6fdd82a2c1..17e51c385ea382d4f2ef124b7032c1604845622d 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.0.1
\ No newline at end of file
+0.1.1