From eed37cdeda96babd71f3d675ebb76c58248676f7 Mon Sep 17 00:00:00 2001
From: Olivier BICHLER <olivier.bichler@cea.fr>
Date: Mon, 8 Apr 2024 14:54:34 +0200
Subject: [PATCH] Make forwardDims() optional

---
 include/aidge/graph/GraphView.hpp             |  2 +-
 include/aidge/operator/Add.hpp                |  2 +-
 include/aidge/operator/AvgPooling.hpp         |  4 +-
 include/aidge/operator/BatchNorm.hpp          |  3 +-
 include/aidge/operator/Concat.hpp             |  2 +-
 include/aidge/operator/Conv.hpp               |  4 +-
 include/aidge/operator/ConvDepthWise.hpp      |  4 +-
 include/aidge/operator/Div.hpp                |  2 +-
 include/aidge/operator/FC.hpp                 |  2 +-
 include/aidge/operator/Gather.hpp             |  2 +-
 include/aidge/operator/GenericOperator.hpp    |  2 +-
 .../aidge/operator/GlobalAveragePooling.hpp   |  2 +-
 include/aidge/operator/Identity.hpp           |  2 +-
 include/aidge/operator/MatMul.hpp             |  2 +-
 include/aidge/operator/MaxPooling.hpp         |  4 +-
 include/aidge/operator/Memorize.hpp           |  2 +-
 include/aidge/operator/MetaOperator.hpp       |  5 +-
 include/aidge/operator/Mul.hpp                |  2 +-
 include/aidge/operator/OperatorTensor.hpp     |  4 +-
 include/aidge/operator/Pad.hpp                |  4 +-
 include/aidge/operator/Pop.hpp                |  2 +-
 include/aidge/operator/Pow.hpp                |  2 +-
 include/aidge/operator/Producer.hpp           |  2 +-
 include/aidge/operator/ReduceMean.hpp         |  2 +-
 include/aidge/operator/Reshape.hpp            |  2 +-
 include/aidge/operator/Slice.hpp              |  2 +-
 include/aidge/operator/Sub.hpp                |  2 +-
 include/aidge/operator/Transpose.hpp          |  4 +-
 python_binding/graph/pybind_GraphView.cpp     |  2 +-
 .../operator/pybind_OperatorTensor.cpp        |  2 +-
 src/graph/GraphView.cpp                       | 10 ++--
 src/operator/Add.cpp                          |  4 +-
 src/operator/Concat.cpp                       |  4 +-
 src/operator/Div.cpp                          |  5 +-
 src/operator/FC.cpp                           |  4 +-
 src/operator/Gather.cpp                       |  5 +-
 src/operator/GenericOperator.cpp              | 10 ++--
 src/operator/GlobalAveragePooling.cpp         | 12 ++---
 src/operator/MatMul.cpp                       |  6 ++-
 src/operator/Memorize.cpp                     |  6 ++-
 src/operator/Mul.cpp                          |  5 +-
 src/operator/OperatorTensor.cpp               | 14 ++++-
 src/operator/Pop.cpp                          |  5 +-
 src/operator/Pow.cpp                          |  5 +-
 src/operator/ReduceMean.cpp                   | 51 ++++++++++---------
 src/operator/Reshape.cpp                      |  5 +-
 src/operator/Slice.cpp                        |  3 +-
 src/operator/Sub.cpp                          |  5 +-
 48 files changed, 152 insertions(+), 85 deletions(-)

diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 845599fd3..59c538ce6 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -210,7 +210,7 @@ public:
      * @brief Compute dimensions of input/output Tensors for each Operator of the
      * GraphView object's Nodes.
      */
-    void forwardDims(const std::vector<std::vector<DimSize_t>> dims = {});
+    bool forwardDims(const std::vector<std::vector<DimSize_t>> dims = {}, bool allowDataDependency = false);
 
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
diff --git a/include/aidge/operator/Add.hpp b/include/aidge/operator/Add.hpp
index 93cfb4451..249303620 100644
--- a/include/aidge/operator/Add.hpp
+++ b/include/aidge/operator/Add.hpp
@@ -60,7 +60,7 @@ public:
     // }
 
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/AvgPooling.hpp b/include/aidge/operator/AvgPooling.hpp
index 031046500..f9d7454f5 100644
--- a/include/aidge/operator/AvgPooling.hpp
+++ b/include/aidge/operator/AvgPooling.hpp
@@ -80,7 +80,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool /*allowDataDependency*/ = false) override final {
         // check inputs have been associated
         if (!getInput(0)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -98,7 +98,9 @@ public:
                                             static_cast<float>(this->template getAttr<AvgPoolingAttr::StrideDims>()[dim])));
             }
             getOutput(0)->resize(outputDims);
+            return true;
         }
+        return false;
     }
 
 
diff --git a/include/aidge/operator/BatchNorm.hpp b/include/aidge/operator/BatchNorm.hpp
index 51673dd3c..9a9db80e9 100644
--- a/include/aidge/operator/BatchNorm.hpp
+++ b/include/aidge/operator/BatchNorm.hpp
@@ -79,7 +79,7 @@ public:
     // }
 
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool allowDataDependency = false) override final {
         // check inputs have been associated
         bool associated = true;
         for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -96,6 +96,7 @@ public:
             }
             mOutputs[0]->resize(getInput(0)->dims());
         }
+        return associated;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
diff --git a/include/aidge/operator/Concat.hpp b/include/aidge/operator/Concat.hpp
index 611ff6bd5..97c477db5 100644
--- a/include/aidge/operator/Concat.hpp
+++ b/include/aidge/operator/Concat.hpp
@@ -70,7 +70,7 @@ public:
         return std::make_shared<Concat_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/Conv.hpp b/include/aidge/operator/Conv.hpp
index c93a09810..45925691b 100644
--- a/include/aidge/operator/Conv.hpp
+++ b/include/aidge/operator/Conv.hpp
@@ -108,7 +108,7 @@ public:
 
     // }
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool allowDataDependency = false) override final {
         // check inputs have been associated
         bool associated = true;
         for (IOIndex_t i = 0; i < 3; ++i) {
@@ -135,6 +135,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>>
diff --git a/include/aidge/operator/ConvDepthWise.hpp b/include/aidge/operator/ConvDepthWise.hpp
index 559c0fc7a..8ffe18c04 100644
--- a/include/aidge/operator/ConvDepthWise.hpp
+++ b/include/aidge/operator/ConvDepthWise.hpp
@@ -90,7 +90,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool /*allowDataDependency*/ = false) override final {
         // check inputs have been associated
         // TODO : add a check of inputs dimensions ?
         bool associated = true;
@@ -124,6 +124,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const override {
diff --git a/include/aidge/operator/Div.hpp b/include/aidge/operator/Div.hpp
index 49410db04..043422ae2 100644
--- a/include/aidge/operator/Div.hpp
+++ b/include/aidge/operator/Div.hpp
@@ -54,7 +54,7 @@ public:
         return std::make_shared<Div_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 39b28c125..323dbc560 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -71,7 +71,7 @@ public:
 
     void associateInput(const IOIndex_t inputIdx, const std::shared_ptr<Data>& data) override final;
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index b7d18e644..7101a2f19 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -71,7 +71,7 @@ public:
         return std::make_shared<Gather_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/GenericOperator.hpp b/include/aidge/operator/GenericOperator.hpp
index e7d60285b..6208ea0a9 100644
--- a/include/aidge/operator/GenericOperator.hpp
+++ b/include/aidge/operator/GenericOperator.hpp
@@ -61,7 +61,7 @@ public:
     }
 
 public:
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     bool outputDimsForwarded() const override final;
 
diff --git a/include/aidge/operator/GlobalAveragePooling.hpp b/include/aidge/operator/GlobalAveragePooling.hpp
index 12c8eb02d..1552d0e08 100644
--- a/include/aidge/operator/GlobalAveragePooling.hpp
+++ b/include/aidge/operator/GlobalAveragePooling.hpp
@@ -52,7 +52,7 @@ public:
     return std::make_shared<GlobalAveragePooling_Op>(*this);
   }
 
-  void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
   void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Identity.hpp b/include/aidge/operator/Identity.hpp
index 27432bc5b..08634d9fa 100644
--- a/include/aidge/operator/Identity.hpp
+++ b/include/aidge/operator/Identity.hpp
@@ -63,7 +63,7 @@ public:
         return std::make_shared<Identity_Op>(*this);
     }
 
-    void computeOutputDims() override final {} // Do nothing
+    bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { return true; } // Do nothing
 
     /**
      * @brief Check if output dimensions have been computed.
diff --git a/include/aidge/operator/MatMul.hpp b/include/aidge/operator/MatMul.hpp
index 43bd8b165..6f7ac2348 100644
--- a/include/aidge/operator/MatMul.hpp
+++ b/include/aidge/operator/MatMul.hpp
@@ -64,7 +64,7 @@ public:
      * @note - Second input is 1-D: it is promoted to a matrix by appending a 1 to its
      * dimensions (D) -> (D,1). The appended 1 is removed after computation.
      */
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/MaxPooling.hpp b/include/aidge/operator/MaxPooling.hpp
index 5b09aa02c..54eeccef7 100644
--- a/include/aidge/operator/MaxPooling.hpp
+++ b/include/aidge/operator/MaxPooling.hpp
@@ -84,7 +84,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool /*allowDataDependency*/ = false) override final {
         if (!getInput(0)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
         }
@@ -108,7 +108,9 @@ public:
             outputDims[1] = inputDims[1];
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
+            return true;
         }
+        return false;
     }
 
 
diff --git a/include/aidge/operator/Memorize.hpp b/include/aidge/operator/Memorize.hpp
index 7de34563a..89d265283 100644
--- a/include/aidge/operator/Memorize.hpp
+++ b/include/aidge/operator/Memorize.hpp
@@ -73,7 +73,7 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
-    void computeOutputDims() override;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
     bool outputDimsForwarded() const override;
     void updateConsummerProducer() override;
     void forward() override;
diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index 5ac9cf3c9..44c52d9eb 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -81,7 +81,7 @@ public:
         mInputs[inputIdx] = std::dynamic_pointer_cast<Tensor>(data);
     }
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool allowDataDependency = false) override final {
         // Check first that all required inputs are available, otherwise
         // mGraph->forwardDims() will fail!
         bool forwarded = true;
@@ -91,8 +91,9 @@ public:
 
         if (forwarded) {
             // Forward dims of micro-graph
-            mGraph->forwardDims();
+            return mGraph->forwardDims({}, allowDataDependency);
         }
+        return false;
     }
 
 
diff --git a/include/aidge/operator/Mul.hpp b/include/aidge/operator/Mul.hpp
index cc9fba594..1ba0f5405 100644
--- a/include/aidge/operator/Mul.hpp
+++ b/include/aidge/operator/Mul.hpp
@@ -57,7 +57,7 @@ public:
         return std::make_shared<Mul_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override;
 
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index adf45c2d8..d6d1d693b 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -80,11 +80,13 @@ public:
      * For each dataInput Tensor of the Operator, the first index and dimensions of the feature area.
      */
     virtual std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<DimSize_t>>> computeReceptiveField(const std::vector<DimSize_t>& firstEltDims, const std::vector<DimSize_t>& outputDims, const IOIndex_t outputIdx = 0) const;
-    virtual void computeOutputDims();
+    virtual bool computeOutputDims(bool allowDataDependency = false);
     virtual bool outputDimsForwarded() const;
     ///////////////////////////////////////////////////
 
     virtual void setDataType(const DataType& dataType) const override;
+    
+    virtual void forward();
 };
 }  // namespace Aidge
 
diff --git a/include/aidge/operator/Pad.hpp b/include/aidge/operator/Pad.hpp
index dce2a6e9e..1201cf18c 100644
--- a/include/aidge/operator/Pad.hpp
+++ b/include/aidge/operator/Pad.hpp
@@ -74,7 +74,7 @@ public:
     }
 
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool allowDataDependency = false) override final {
         bool associated = true;
         for (IOIndex_t i = 0; i < nbInputs(); ++i) {
             if (!getInput(i)) {
@@ -95,6 +95,8 @@ public:
             outputDims[0] = inputDims[0];
             mOutputs[0]->resize(outputDims);
         }
+
+        return associated;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
diff --git a/include/aidge/operator/Pop.hpp b/include/aidge/operator/Pop.hpp
index 9109ccaeb..c584390ca 100644
--- a/include/aidge/operator/Pop.hpp
+++ b/include/aidge/operator/Pop.hpp
@@ -66,7 +66,7 @@ public:
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
     void updateConsummerProducer() override;
     void forward() override;
 
diff --git a/include/aidge/operator/Pow.hpp b/include/aidge/operator/Pow.hpp
index f2becdc60..b83cf15d6 100644
--- a/include/aidge/operator/Pow.hpp
+++ b/include/aidge/operator/Pow.hpp
@@ -53,7 +53,7 @@ public:
         return std::make_shared<Pow_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/Producer.hpp b/include/aidge/operator/Producer.hpp
index 1e5a3940b..79a116e4a 100644
--- a/include/aidge/operator/Producer.hpp
+++ b/include/aidge/operator/Producer.hpp
@@ -86,7 +86,7 @@ public:
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Producer operator takes no input.");
     }
 
-    void computeOutputDims() noexcept override final {}
+    bool computeOutputDims(bool /*allowDataDependency*/ = false) override final { return true; }
 
     inline bool outputDimsForwarded() const noexcept override final { return true; }
 
diff --git a/include/aidge/operator/ReduceMean.hpp b/include/aidge/operator/ReduceMean.hpp
index ab27e4e02..25fba5e79 100644
--- a/include/aidge/operator/ReduceMean.hpp
+++ b/include/aidge/operator/ReduceMean.hpp
@@ -69,7 +69,7 @@ class ReduceMean_Op : public OperatorTensor,
         return std::make_shared<ReduceMean_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index 060029bb8..8f1482019 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -67,7 +67,7 @@ public:
         return std::make_shared<Reshape_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
 
diff --git a/include/aidge/operator/Slice.hpp b/include/aidge/operator/Slice.hpp
index f68aa17f4..69278c59b 100644
--- a/include/aidge/operator/Slice.hpp
+++ b/include/aidge/operator/Slice.hpp
@@ -69,7 +69,7 @@ public:
      */
     std::shared_ptr<Operator> clone() const override { return std::make_shared<Slice_Op>(*this); }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
         SET_IMPL_MACRO(Slice_Op, *this, name);
diff --git a/include/aidge/operator/Sub.hpp b/include/aidge/operator/Sub.hpp
index fbcebcc9f..6969a6d83 100644
--- a/include/aidge/operator/Sub.hpp
+++ b/include/aidge/operator/Sub.hpp
@@ -57,7 +57,7 @@ public:
         return std::make_shared<Sub_Op>(*this);
     }
 
-    void computeOutputDims() override final;
+    bool computeOutputDims(bool allowDataDependency = false) override final;
 
 
     void setBackend(const std::string& name, DeviceIdx_t device = 0) override final;
diff --git a/include/aidge/operator/Transpose.hpp b/include/aidge/operator/Transpose.hpp
index 1beb5781b..5bebd6056 100644
--- a/include/aidge/operator/Transpose.hpp
+++ b/include/aidge/operator/Transpose.hpp
@@ -71,7 +71,7 @@ class Transpose_Op : public OperatorTensor,
         return std::make_shared<Transpose_Op<DIM>>(*this);
     }
 
-    void computeOutputDims() override final {
+    bool computeOutputDims(bool allowDataDependency = false) override final {
         if (!getInput(0)->empty()) {
             auto attr = (this)->getStaticAttributes();
             const std::array<DimSize_t, DIM>& outDimsOrder = static_cast<const std::array<DimSize_t, DIM>&>(std::get<0>(attr));
@@ -80,7 +80,9 @@ class Transpose_Op : public OperatorTensor,
                 outputDims.push_back(getInput(0)->dims()[outDimsOrder[i]]);
             }
             mOutputs[0]->resize(outputDims);
+            return true;
         }
+        return false;
     }
 
     void setBackend(const std::string &name, DeviceIdx_t device = 0) override {
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 953ec981e..04248796b 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -117,7 +117,7 @@ void init_GraphView(py::module& m) {
 
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
+          .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false)
           .def("compile", &GraphView::compile, py::arg("backend"), py::arg("datatype"), py::arg("device") = 0, py::arg("dims")=std::vector<std::vector<DimSize_t>>())
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index 4cd730649..301963da2 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -26,7 +26,7 @@ void init_OperatorTensor(py::module& m){
 
     .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
     .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
-    .def("compute_output_dims", &OperatorTensor::computeOutputDims)
+    .def("compute_output_dims", &OperatorTensor::computeOutputDims, py::arg("allow_data_dependency") = false)
     .def("output_dims_forwarded", &OperatorTensor::outputDimsForwarded)
     ;
 }
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index dcd7a06ef..9b53a9d82 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -391,7 +391,7 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims(dims);
 }
 
-void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims) {
+bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>> dims, bool allowDataDependency) {
     // setInputs
     // Link every tensor to the right pointer
     // following parent - children informations
@@ -436,7 +436,7 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
               const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
               // Recompute everytime, even if it was already computed in a
               // previous call of forwardDims(), as the graph may have changed!
-              op->computeOutputDims();
+              op->computeOutputDims(allowDataDependency);
               if (!op->outputDimsForwarded()) {
                   nextList.insert(nodePtr);
               }
@@ -450,12 +450,16 @@ void Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
             std::transform(nextList.begin(), nextList.end(),
                 std::back_inserter(nodesName),
                 [](auto val){ return val->name() + " (" + val->type() + ")"; });
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Unable to forward dimensions (circular dependency and/or wrong dimensions?). Unable to compute output dims for nodes {}.", nodesName);
+
+            Log::warn("Unable to forward dimensions (circular dependency and/or wrong dimensions and/or data dependent dimension?). Unable to compute output dims for nodes {}.", nodesName);
+            return false;
         }
 
         listNodes.swap(nextList);
     }
     while (!listNodes.empty());
+
+    return listNodes.empty();
 }
 
 void Aidge::GraphView::setBackend(const std::string &backend, const DeviceIdx_t device) const {
diff --git a/src/operator/Add.cpp b/src/operator/Add.cpp
index 85bc4b7ae..9f9ad681c 100644
--- a/src/operator/Add.cpp
+++ b/src/operator/Add.cpp
@@ -32,7 +32,7 @@ Aidge::Add_Op::Add_Op(const Add_Op& op)
     }
 }
 
-void Aidge::Add_Op::computeOutputDims() {
+bool Aidge::Add_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -70,6 +70,8 @@ void Aidge::Add_Op::computeOutputDims() {
         }
         mOutputs[0]->resize(outDims);
     }
+
+    return associated;
 }
 
 void Aidge::Add_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/Concat.cpp b/src/operator/Concat.cpp
index 7df5b6dbf..d2bfd17ba 100644
--- a/src/operator/Concat.cpp
+++ b/src/operator/Concat.cpp
@@ -20,7 +20,7 @@
 
 const std::string Aidge::Concat_Op::Type = "Concat";
 
-void Aidge::Concat_Op::computeOutputDims() {
+bool Aidge::Concat_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // Every input is non-empty with the same number of dimensions
     bool associated = (getInput(0) != nullptr);
     associated &= !(getInput(0)->empty()) && (getAttr<ConcatAttr::Axis>() < getInput(0)->nbDims()); // do not compute anything if no input
@@ -49,6 +49,8 @@ void Aidge::Concat_Op::computeOutputDims() {
     if (associated) {
         getOutput(0)->resize(outputDims);
     }
+
+    return associated;
 }
 
 void Aidge::Concat_Op::setBackend(const std::string& name, DeviceIdx_t device) {
diff --git a/src/operator/Div.cpp b/src/operator/Div.cpp
index 5ffe5f08d..0c43d7a3a 100644
--- a/src/operator/Div.cpp
+++ b/src/operator/Div.cpp
@@ -22,7 +22,7 @@
 
 const std::string Aidge::Div_Op::Type = "Div";
 
-void Aidge::Div_Op::computeOutputDims() {
+bool Aidge::Div_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -50,7 +50,10 @@ void Aidge::Div_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index 9865d64f6..acb1896ff 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -36,7 +36,7 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
         mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
 }
 
-void Aidge::FC_Op::computeOutputDims() {
+bool Aidge::FC_Op::computeOutputDims(bool /*allowDataDependency*/) {
     bool associated = true;
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
         if (!getInput(i)) {
@@ -48,6 +48,8 @@ void Aidge::FC_Op::computeOutputDims() {
         // <batch, OutChannels>
         mOutputs[0]->resize({getInput(0)->dims()[0], this->template getAttr<FCAttr::OutChannels>()});
     }
+
+    return associated;
 }
 
 void Aidge::FC_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index 259e65139..082df8473 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -23,7 +23,7 @@
 
 const std::string Aidge::Gather_Op::Type = "Gather";
 
-void Aidge::Gather_Op::computeOutputDims() {
+bool Aidge::Gather_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
@@ -46,7 +46,10 @@ void Aidge::Gather_Op::computeOutputDims() {
         }
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Gather_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/GenericOperator.cpp b/src/operator/GenericOperator.cpp
index 3eae49b69..0472a67cb 100644
--- a/src/operator/GenericOperator.cpp
+++ b/src/operator/GenericOperator.cpp
@@ -25,7 +25,7 @@ const Aidge::GenericOperator_Op::ComputeDimsFunc Aidge::GenericOperator_Op::Inpu
     return [nbOutputs, inputIdx](const std::vector<std::vector<std::size_t>>& inputsDims) { return std::vector<std::vector<std::size_t>>(nbOutputs, inputsDims[inputIdx]); };
 }
 
-void Aidge::GenericOperator_Op::computeOutputDims() {
+bool Aidge::GenericOperator_Op::computeOutputDims(bool /*allowDataDependency*/) {
     if (mComputeOutputDims) {
         std::vector<std::vector<std::size_t>> inputsDims(nbInputs(), std::vector<std::size_t>());
         for (std::size_t i = 0; i < nbInputs(); ++i) {
@@ -39,9 +39,11 @@ void Aidge::GenericOperator_Op::computeOutputDims() {
         for (std::size_t i = 0; i < nbOutputs(); ++i) {
             mOutputs[i]->resize(outputsDims[i]);
         }
+        return true;
     }
     else {
-        AIDGE_ASSERT(false, "Cannot compute output dim of a GenericOperator");
+        Log::warn("GenericOperator: cannot compute output dims, no ComputeDimsFunc function provided.");
+        return false;
     }
 }
 
@@ -50,7 +52,7 @@ bool Aidge::GenericOperator_Op::outputDimsForwarded() const {
         return !(mOutputs[0]->empty());
     }
     else {
-        AIDGE_ASSERT(false, "GenericOperator cannot forward dims");
+        Log::notice("GenericOperator: not output dims forwarded, no ComputeDimsFunc function provided.");
         return false;
     }
-}
\ No newline at end of file
+}
diff --git a/src/operator/GlobalAveragePooling.cpp b/src/operator/GlobalAveragePooling.cpp
index 618ccc06f..a851faee8 100644
--- a/src/operator/GlobalAveragePooling.cpp
+++ b/src/operator/GlobalAveragePooling.cpp
@@ -21,18 +21,13 @@
 
 const std::string Aidge::GlobalAveragePooling_Op::Type = "GlobalAveragePooling";
 
-void Aidge::GlobalAveragePooling_Op::computeOutputDims() {
+bool Aidge::GlobalAveragePooling_Op::computeOutputDims(bool /*allowDataDependency*/) {
   // error checking
   if (!getInput(0)) {
     AIDGE_THROW_OR_ABORT(std::runtime_error,
                          "GlobalAveragePooling : The input was not connected");
   }
-  // necessary bc forward dims sometimes passes with an empty vector before
-  // doing another pass
-  else if (getInput(0)->empty()) {
-    return;
-  // computation
-  } else {
+  else if (!getInput(0)->empty()) {
     AIDGE_ASSERT(getInput(0)->dims().size() >= 3,
                  "GlobalAveragePooling :  needs at least a 3 dimensions input, "
                  "number of input dim : {}",
@@ -43,7 +38,10 @@ void Aidge::GlobalAveragePooling_Op::computeOutputDims() {
     const std::vector<DimSize_t> out_dims{getInput(0)->dims().at(0),
                                           getInput(0)->dims().at(1)};
     mOutputs[0]->resize(out_dims);
+    return true;
   }
+
+  return false;
 }
 
 void Aidge::GlobalAveragePooling_Op::setBackend(const std::string &name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/MatMul.cpp b/src/operator/MatMul.cpp
index 568998753..223aeb93c 100644
--- a/src/operator/MatMul.cpp
+++ b/src/operator/MatMul.cpp
@@ -20,13 +20,14 @@
 
 const std::string Aidge::MatMul_Op::Type = "MatMul";
 
-void Aidge::MatMul_Op::computeOutputDims() {
+bool Aidge::MatMul_Op::computeOutputDims(bool /*allowDataDependency*/) {
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Missing input. Cannot compute output dimensions for MatMul Operator.");
     }
     if (getInput(0)->empty() && getInput(1)->empty()) {
         // both inputs are scalar
         mOutputs[0]->resize({});
+        return true;
     }
     else if (!getInput(0)->empty() && !getInput(1)->empty())
     {
@@ -69,7 +70,10 @@ void Aidge::MatMul_Op::computeOutputDims() {
             outDims.push_back(dims1[dims_size-1]);
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+    
+    return false;
 }
 
 void Aidge::MatMul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Memorize.cpp b/src/operator/Memorize.cpp
index 6e54a234d..3490a5f6d 100644
--- a/src/operator/Memorize.cpp
+++ b/src/operator/Memorize.cpp
@@ -22,7 +22,7 @@
 
 const std::string Aidge::Memorize_Op::Type = "Memorize";
 
-void Aidge::Memorize_Op::computeOutputDims() {
+bool Aidge::Memorize_Op::computeOutputDims(bool /*allowDataDependency*/) {
     for (size_t i = 0; i < 2; ++i) {
         if (!getInput(i)) {
             AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #{} should be associated with a Tensor", type(), i);
@@ -34,11 +34,15 @@ void Aidge::Memorize_Op::computeOutputDims() {
     if (!(getInput(0)->empty())) {
         const auto expectedDims =  getInput(0)->dims();
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
     else if (!(getInput(1)->empty())) {
         const auto expectedDims =  getInput(1)->dims();
         mOutputs[0]->resize(expectedDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Memorize_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Mul.cpp b/src/operator/Mul.cpp
index 89bef9e0e..253c1ba2f 100644
--- a/src/operator/Mul.cpp
+++ b/src/operator/Mul.cpp
@@ -23,7 +23,7 @@
 
 const std::string Aidge::Mul_Op::Type = "Mul";
 
-void Aidge::Mul_Op::computeOutputDims() {
+bool Aidge::Mul_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -51,10 +51,13 @@ void Aidge::Mul_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
     else if (!getInput(0)->empty() && !getInput(1)->empty()) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Incompatible input dimensions for Operator Mul: {} and {}", getInput(0)->dims(), getInput(1)->dims());
     }
+
+    return false;
 }
 
 void Aidge::Mul_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index b85c18040..8390ee406 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -131,7 +131,7 @@ std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_
     return std::vector<std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>>(nbData(),std::pair<std::vector<Aidge::DimSize_t>, std::vector<Aidge::DimSize_t>>(firstEltDims, outputDims));
 }
 
-void Aidge::OperatorTensor::computeOutputDims() {
+bool Aidge::OperatorTensor::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     bool associated = (nbInputs() > 0); // do not compute anything if no input
     for (IOIndex_t i = 0; i < nbInputs(); ++i) {
@@ -151,6 +151,8 @@ void Aidge::OperatorTensor::computeOutputDims() {
         }
         mOutputs[0]->resize(expectedDims);
     }
+
+    return associated;
 }
 
 bool Aidge::OperatorTensor::outputDimsForwarded() const {
@@ -176,4 +178,12 @@ void Aidge::OperatorTensor::setDataType(const DataType& dataType) const {
         AIDGE_ASSERT(getInput(i) != nullptr, "Missing input#{} for operator {}", i, type());
         getInput(i)->setDataType(dataType);
     }
-}
\ No newline at end of file
+}
+
+void Aidge::OperatorTensor::forward() {
+    if (!outputDimsForwarded()) {
+        computeOutputDims();
+    }
+
+    Operator::forward();
+}
diff --git a/src/operator/Pop.cpp b/src/operator/Pop.cpp
index 06999e301..9e7b36025 100644
--- a/src/operator/Pop.cpp
+++ b/src/operator/Pop.cpp
@@ -23,7 +23,7 @@
 
 const std::string Aidge::Pop_Op::Type = "Pop";
 
-void Aidge::Pop_Op::computeOutputDims() {
+bool Aidge::Pop_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -32,7 +32,10 @@ void Aidge::Pop_Op::computeOutputDims() {
         auto inputDims = getInput(0)->dims();
         inputDims.erase(inputDims.begin());
         getOutput(0)->resize(inputDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Pop_Op::updateConsummerProducer() {
diff --git a/src/operator/Pow.cpp b/src/operator/Pow.cpp
index 72a04de04..32194498b 100644
--- a/src/operator/Pow.cpp
+++ b/src/operator/Pow.cpp
@@ -22,7 +22,7 @@
 
 const std::string Aidge::Pow_Op::Type = "Pow";
 
-void Aidge::Pow_Op::computeOutputDims() {
+bool Aidge::Pow_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -50,7 +50,10 @@ void Aidge::Pow_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Pow_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/ReduceMean.cpp b/src/operator/ReduceMean.cpp
index 0de676e22..f00ea98a9 100644
--- a/src/operator/ReduceMean.cpp
+++ b/src/operator/ReduceMean.cpp
@@ -26,34 +26,35 @@
 
 const std::string Aidge::ReduceMean_Op::Type = "ReduceMean";
 
-void Aidge::ReduceMean_Op::computeOutputDims() {
-        if (!getInput(0)) {
-            AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+bool Aidge::ReduceMean_Op::computeOutputDims(bool /*allowDataDependency*/) {
+    if (!getInput(0)) {
+        AIDGE_THROW_OR_ABORT(std::runtime_error, "Every input should be associated with a Tensor");
+    }
+    if (!getInput(0)->empty()) {
+        // make Axes attribute positive
+        std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
+        std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
+            if (val < 0)
+                val+=static_cast<std::int32_t>(getInput(0)->nbDims());
+        });
+        std::sort(axes.begin(), axes.end());
+
+        // build output dimensions
+        std::vector<DimSize_t> outDims = getInput(0)->dims();
+        if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
+            std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
         }
-        if (!getInput(0)->empty()) {
-            // make Axes attribute positive
-            std::vector<std::int32_t>& axes = this->template getAttr<ReduceMeanAttr::Axes>();
-            std::for_each(axes.begin(), axes.end(), [&] (std::int32_t& val) {
-                if (val < 0)
-                    val+=static_cast<std::int32_t>(getInput(0)->nbDims());
-            });
-            std::sort(axes.begin(), axes.end());
-
-            // build output dimensions
-            std::vector<DimSize_t> outDims = getInput(0)->dims();
-            if (this->template getAttr<ReduceMeanAttr::KeepDims>()) {
-                std::for_each(axes.cbegin(), axes.cend(), [&outDims] (const std::int32_t& val) { outDims[val] = 1; });
-            }
-            else {
-                for (auto it = axes.crbegin(); it != axes.crend(); ++it)
-                    outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
-            }
-
-            // TODO: change {1} for {} when scalar Tensors are better handled.
-            mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
-
+        else {
+            for (auto it = axes.crbegin(); it != axes.crend(); ++it)
+                outDims.erase(outDims.begin() + static_cast<std::size_t>(*it));
         }
+
+        // TODO: change {1} for {} when scalar Tensors are better handled.
+        mOutputs[0]->resize((outDims.size()>0) ? outDims : std::vector<DimSize_t>({1}));
+        return true;
     }
+    return false;
+}
 
 void Aidge::ReduceMean_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     SET_IMPL_MACRO(ReduceMean_Op, *this, name);
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index 79cfc0659..4ae7b1217 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -25,7 +25,7 @@
 
 const std::string Aidge::Reshape_Op::Type = "Reshape";
 
-void Aidge::Reshape_Op::computeOutputDims() {
+bool Aidge::Reshape_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check input has been associated
     if (!getInput(0)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Input was not connected");
@@ -58,7 +58,10 @@ void Aidge::Reshape_Op::computeOutputDims() {
         }
 
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Reshape_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
diff --git a/src/operator/Slice.cpp b/src/operator/Slice.cpp
index 6d2670695..161f1d336 100644
--- a/src/operator/Slice.cpp
+++ b/src/operator/Slice.cpp
@@ -24,7 +24,7 @@
 
 const std::string Aidge::Slice_Op::Type = "Slice";
 
-void Aidge::Slice_Op::computeOutputDims() {
+bool Aidge::Slice_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check input have been associated
     if (!getInput(0) || (getInput(0)->empty())) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "{}: input #0 should be associated with a Tensor", type());
@@ -50,4 +50,5 @@ void Aidge::Slice_Op::computeOutputDims() {
         outDims[axis] = sliceLength;
     }
     mOutputs[0]->resize(outDims);
+    return true;
 }
diff --git a/src/operator/Sub.cpp b/src/operator/Sub.cpp
index 0c12e6a1f..82b99b876 100644
--- a/src/operator/Sub.cpp
+++ b/src/operator/Sub.cpp
@@ -24,7 +24,7 @@
 
 const std::string Aidge::Sub_Op::Type = "Sub";
 
-void Aidge::Sub_Op::computeOutputDims() {
+bool Aidge::Sub_Op::computeOutputDims(bool /*allowDataDependency*/) {
     // check inputs have been associated
     if (!getInput(0) || !getInput(1)) {
         AIDGE_THROW_OR_ABORT(std::runtime_error, "At least one input was not connected");
@@ -52,7 +52,10 @@ void Aidge::Sub_Op::computeOutputDims() {
             --low_id;
         }
         mOutputs[0]->resize(outDims);
+        return true;
     }
+
+    return false;
 }
 
 void Aidge::Sub_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
-- 
GitLab