From 0437b28a99bef77743a4d84c08dff993893b0dab Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Mon, 10 Mar 2025 21:28:33 +0000
Subject: [PATCH 01/13] Add first version of forwardDType.

---
 include/aidge/graph/GraphView.hpp         |  15 ++
 include/aidge/operator/Gather.hpp         |   6 +
 include/aidge/operator/OperatorTensor.hpp |  12 +-
 include/aidge/operator/Reshape.hpp        |   5 +
 include/aidge/operator/Shape.hpp          |   6 +
 include/aidge/operator/Unsqueeze.hpp      |   6 +
 python_binding/graph/pybind_GraphView.cpp |   3 +-
 src/graph/GraphView.cpp                   | 168 ++++++++++++++++++----
 src/operator/Gather.cpp                   |   9 +-
 src/operator/OperatorTensor.cpp           |  30 ++++
 src/operator/Reshape.cpp                  |   8 ++
 src/operator/Shape.cpp                    |   5 +-
 src/operator/Unsqueeze.cpp                |  11 ++
 13 files changed, 254 insertions(+), 30 deletions(-)

diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index c6e3322ae..37ddb382d 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -295,6 +295,8 @@ public:
      */
     bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
 
+    bool forwardDType(const std::vector<DataType>& inputTypes = {});
+
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
     /** @brief Set the same data type for each Operator of the GraphView object's Nodes. */
@@ -613,6 +615,19 @@ private:
      */
     void updateInputsOutputsDelete(NodePtr deletedNode);
 
+    /**
+     * @brief Validates the connectivity and tensor integrity of the graph.
+     *
+     * This function ensures that all nodes in the graph are correctly connected
+     * and that mandatory input tensors are properly defined. It verifies:
+     * - That each node's input matches the expected output from its connected node.
+     * - That all mandatory inputs are present and defined.
+     * - Logs an error and returns `false` if any inconsistency is detected.
+     *
+     * @return `true` if all connections and tensor states are valid, `false` otherwise.
+     */
+    bool connectionValid();
+
     ///////////////////////////////////////////////////////
     //        TOPOLOGY
     ///////////////////////////////////////////////////////
diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 8bd8239ec..4ce9f7a49 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -111,6 +111,12 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Forward the data type.
+     * @return True if successful, false otherwise.
+     */
+    bool forwardDType() override final;
+
     /**
      * @brief Set the backend for the operator.
      * @param name The name of the backend.
diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp
index c24d3ba21..3ba37cbbb 100644
--- a/include/aidge/operator/OperatorTensor.hpp
+++ b/include/aidge/operator/OperatorTensor.hpp
@@ -172,6 +172,16 @@ public:
      */
     virtual bool forwardDims(bool allowDataDependency = false);
 
+    /**
+     * @brief Computes the data type of the operator's output tensor based on input data type.
+     *
+     * For each operator inputs:
+     * - If input is an (optional) Param, the operator will forward
+     *
+     * @return True if data types are successfully computed, false otherwise.
+     */
+    virtual bool forwardDType();
+
     /**
      * @brief Checks if dimensions have been successfully forwarded.
      * @return True if dimensions are forwarded, false otherwise.
@@ -189,7 +199,7 @@ public:
 
     /**
      * @brief Sets the data type of the operator's tensors.
-     * @warning Sets all outputs but only inputs of category 
+     * @warning Sets all outputs but only inputs of category
      * @code InputCategory::Param @endcode & @code InputCategory::OptionnalParam @endcode
      * @param dataType Data type to set.
      */
diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index c93ef09c9..f8bfaf73b 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -120,6 +120,11 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Forward the data type.
+     * @return True if successful, false otherwise.
+     */
+    bool forwardDType() override final;
     /**
      * @brief Set the backend for the Reshape operator.
      * @param[in] name Name of the backend.
diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp
index 4028c4041..3d5d02f91 100644
--- a/include/aidge/operator/Shape.hpp
+++ b/include/aidge/operator/Shape.hpp
@@ -108,6 +108,12 @@ public:
      */
     bool forwardDims(bool /*allowDataDependency*/ = false) override final;
 
+    /**
+     * @brief Forward the data type.
+     * @return True if successful, false otherwise.
+     */
+    bool forwardDType() override final;
+
     /**
      * @brief Set the backend for the Shape operator.
      * @param[in] name Name of the backend.
diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index 27b3851fc..b8b367090 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -105,6 +105,12 @@ public:
    * @brief Compute dimensions for the output Tensor
    */
   bool forwardDims(bool allowDataDependency = false) override final;
+  /**
+   * @brief Forward the data type.
+   * @return True if successful, false otherwise.
+   */
+  bool forwardDType() override final;
+
   bool dimsForwarded() const override final;
 
   void setBackend(const std::string &name,
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index abb1a9eca..1d1778c31 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -128,6 +128,7 @@ void init_GraphView(py::module& m) {
           .def("clone", &GraphView::clone)
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
+          .def("forward_dtype", &GraphView::forwardDType, py::arg("dtypes") = std::vector<DataType>())
           .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false,
           R"mydelimiter(
             Compute and propagate Tensor dimensions through the GraphView.
@@ -209,7 +210,7 @@ void init_GraphView(py::module& m) {
               :param dims: input dimension to forward
               :type dims: List[List[Int]]
 
-               
+
                )mydelimiter")
           .def("__call__", &GraphView::operator(), py::arg("connectors"))
           .def("set_datatype", &GraphView::setDataType, py::arg("datatype"))
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index 07fb764b4..d28d48dd3 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -451,6 +451,147 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims(dims);
 }
 
+bool Aidge::GraphView::connectionValid(){
+    // Ensure every node in the graph is correctly connected
+    Log::debug("Verifying graph connections and tensor validity");
+    for (std::shared_ptr<Node> nodePtr : getNodes()) {
+        for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
+            std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
+            if (inputI.first) {
+                if (nodePtr->getOperator()->getRawInput(i) != inputI.first->getOperator()->getRawOutput(inputI.second)) {
+                    Log::error("Connection mismatch: Input#{} of node [\033[1m\033[3m{}\033[0m (\033[1m\033[3m{}\033[0m)] -> Output#{} of node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
+                        i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
+                    return false;
+                }
+            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData &&
+                    nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
+                if (!nodePtr->getOperator()->getRawInput(i)) {
+                    Log::error("Missing mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
+                        i, nodePtr->name(), nodePtr->type());
+                    return false;
+                }
+                if (std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) {
+                    Log::error("Undefined mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
+                        i, nodePtr->name(), nodePtr->type());
+                    return false;
+                }
+            }
+        }
+    }
+    return true;
+}
+
+bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTypes){
+    if (!inputTypes.empty()){
+        auto msg = fmt::format("Manually setting GraphView input data type with provided parameters:");
+        for (std::size_t i = 0; i< inputTypes.size(); ++i)
+            msg = fmt::format("{}\n\t* input#{} {}", msg, i, inputTypes[i]);
+        Log::info("{}", msg);
+
+        Log::debug("Validating input dtype against existing graph inputs");
+        std::size_t i = 0;
+        for (auto& input : mInputNodes) {
+            const auto& currentTensorPtr =
+                std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second);
+            if (i < inputTypes.size()) {
+                if (!currentTensorPtr) { // tensor detected
+                    Log::debug("Creating new tensor for input#{} with dtype {}", i, inputTypes[i]);
+                    auto tensor = std::make_shared<Tensor>(inputTypes[i], DataFormat::Default);
+                    input.first->getOperator()->setInput(input.second, tensor);
+                }
+            }
+            else {
+                const bool optional = (input.first->inputCategory(input.second) == InputCategory::OptionalData
+                    || input.first->inputCategory(input.second) == InputCategory::OptionalParam);
+
+                if (currentTensorPtr) {
+                    Log::debug("Using existing data type {} for graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])",
+                            currentTensorPtr->dataType(), i, input.second, input.first->name(), input.first->type());
+                }
+                else if (!optional) {
+                    Log::warn("Missing data type for mandatory graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])",
+                            i, input.second, input.first->name(), input.first->type());
+                }
+            }
+            ++i;
+        }
+    }
+    if(!connectionValid()) return false;
+    // INITIALIZING Open and Close sets
+    std::set<std::shared_ptr<Node>> close;               // Already treated nodes
+    std::set<std::shared_ptr<Node>> open = inputNodes(); // Nodes to treat
+    for (const auto& nodePtr : getNodes()) {
+        if (nodePtr->type() == Producer_Op::Type) {
+            // Producers dType is set by user
+            // So it is considered already treated
+            close.insert(nodePtr);
+            // Producers childs are put in open list
+            for (const auto& child : nodePtr->getChildren()) {
+                if (inView(child)) open.insert(child);
+            }
+        }
+    }
+    do{
+        std::set<std::shared_ptr<Node>> newOpen;
+        for (const auto& nodePtr : open) {
+            if (nodePtr->getOperator()->operatorType() != OperatorType::Tensor) {
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Node {} (of type {}) as it is not an OperatorTensor. ForwardDType is currently only supported for OperatorTensor.", nodePtr->name(), nodePtr->type());
+            }
+            const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator());
+            bool anyParent = false;
+            bool parentsForwarded = true;
+            for (const auto& parent : nodePtr->getParents()) {
+                if (parent != nullptr && inView(parent) && close.find(parent) == close.end()) {
+                    Log::debug("Data type not forwarded for parent (node {} (of type {})) of node {} (of type {})",
+                        parent->name(), parent->type(), nodePtr->name(), nodePtr->type());
+                    parentsForwarded = false;
+                }
+                else {
+                    anyParent = true;
+                }
+            }
+            // Special rule for Memorize_Op, which only requires one parent
+            // to have its dtype forwarded. This avoids circular dependency.
+            if (nodePtr->type() == Memorize_Op::Type && anyParent) {
+                parentsForwarded = true;
+            }
+            if (parentsForwarded && op->forwardDType()) {
+                Log::debug("Data type forwarded for node {} (of type {})",
+                    nodePtr->name(), nodePtr->type());
+
+                // Recompute every time, even if it was already computed in a
+                // previous call of forwardDims(), as the graph may have changed!
+                close.insert(nodePtr);
+                for (const auto& child : nodePtr->getChildren()) {
+                    if (inView(child) && close.find(child) == close.end()) {
+                        newOpen.insert(child);
+                    }
+                }
+            }
+            else {
+                if (parentsForwarded) {
+                    Log::debug("Unable to forward dimensions for node {} (of type {})", nodePtr->name(), nodePtr->type());
+                }
+                Log::debug("Adding back node {} (of type {}) to the list of nodes to forward data type", nodePtr->name(), nodePtr->type());
+                newOpen.insert(nodePtr);
+            }
+
+        }
+        if (newOpen == open) {
+            // We are stuck!
+            std::vector<std::string> nodesName;
+            std::transform(newOpen.begin(), newOpen.end(),
+                std::back_inserter(nodesName),
+                [](auto val){ return val->name() + " (" + val->type() + ")"; });
+
+            Log::warn("Unable to forward data type (circular dependency and/or wrong dimensions and/or data dependent dimension?). Unable to compute output data type for nodes {}.", nodesName);
+            return false;
+        }
+        open.swap(newOpen);
+    }while(!open.empty());
+    return open.empty();
+}
+
 bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) {
     Log::debug("Starting dimension forward propagation for GraphView");
     // remove current Data connections and use dummy inputs to propagate dimensions
@@ -499,32 +640,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_
       }
     }
 
-    // Ensure every node in the graph is correctly connected
-    Log::debug("Verifying graph connections and tensor validity");
-    for (std::shared_ptr<Node> nodePtr : getNodes()) {
-        for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) {
-            std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i);
-            if (inputI.first) {
-                if (nodePtr->getOperator()->getRawInput(i) != inputI.first->getOperator()->getRawOutput(inputI.second)) {
-                    Log::error("Connection mismatch: Input#{} of node [\033[1m\033[3m{}\033[0m (\033[1m\033[3m{}\033[0m)] -> Output#{} of node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
-                        i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type());
-                    return false;
-                }
-            } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData &&
-                    nodePtr->inputCategory(i) != InputCategory::OptionalParam) {
-                if (!nodePtr->getOperator()->getRawInput(i)) {
-                    Log::error("Missing mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
-                        i, nodePtr->name(), nodePtr->type());
-                    return false;
-                }
-                if (std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) {
-                    Log::error("Undefined mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
-                        i, nodePtr->name(), nodePtr->type());
-                    return false;
-                }
-            }
-        }
-    }
+    if(!connectionValid()) return false;
 
     Log::debug("Initializing dimension propagation");
     // Establish initial list of dims forwardable nodes: graph input node + Producers childs
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index a4cb4aab0..ccef5ec53 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -59,7 +59,14 @@ bool Aidge::Gather_Op::dimsForwarded() const {
 
     return OperatorTensor::dimsForwarded();
 }
-
+bool Aidge::Gather_Op::forwardDType(){
+    if (inputsAssociated()) {
+        mOutputs[0]->setDataType(getInput(0)->dataType());
+        return true;
+    }
+    Log::notice("Gather_Op: No input associated, failed to forward data type.");
+    return false;
+}
 bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
         // Copy optional input #1, if present, to attribute Indices
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index f907c5849..050d823ae 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -169,6 +169,36 @@ bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) {
     return false;
 }
 
+bool Aidge::OperatorTensor::forwardDType(){
+    Log::debug("Running default forwardDtype for operator {}",
+                    type());
+
+    if (inputsAssociated()) {
+        const auto expectedDType =  getInput(0)->dataType();
+        for (std::size_t i = 1; i < nbInputs(); ++i) {
+            if (inputCategory(i) == InputCategory::OptionalParam
+            || inputCategory(i) == InputCategory::Param){
+                // Param input can be different dtype than data input
+                continue;
+            }
+            if (expectedDType != getInput(i)->dataType()) {
+                Log::notice("{} operator's inputs should have the same datatype: expected {} (input #0), given {} (input #{})",
+                    type(), expectedDType, getInput(i)->dataType(), i);
+                return false;
+            }
+        }
+
+        for (std::size_t o = 0; o < nbOutputs(); ++o) {
+            Log::debug("Setting output#{} dtype to {}",
+                o, expectedDType);
+            mOutputs[o]->setDataType(expectedDType);
+        }
+        return true;
+    }
+
+    return false;
+}
+
 bool Aidge::OperatorTensor::dimsForwarded() const {
     bool forwarded = true;
     // check both inputs and outputs have been filled
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index b12fd486d..b4cd272a1 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -59,6 +59,14 @@ bool Aidge::Reshape_Op::dimsForwarded() const {
 
     return OperatorTensor::dimsForwarded();
 }
+bool Aidge::Reshape_Op::forwardDType(){
+    if (inputsAssociated()) {
+        mOutputs[0]->setDataType(getInput(0)->dataType());
+        return true;
+    }
+    Log::notice("Reshape_Op: No input associated, failed to forward data type.");
+    return false;
+}
 
 bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp
index 4db470473..4791a14a5 100644
--- a/src/operator/Shape.cpp
+++ b/src/operator/Shape.cpp
@@ -49,7 +49,10 @@ Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op)
 std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const {
     return std::make_shared<Shape_Op>(*this);
 }
-
+bool Aidge::Shape_Op::forwardDType(){
+    mOutputs[0]->setDataType(DataType::Int64);
+    return true;
+}
 bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         if (this->start() < 0)
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 679b420ec..23d310bbe 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -55,6 +55,17 @@ bool Aidge::Unsqueeze_Op::dimsForwarded() const {
   return OperatorTensor::dimsForwarded();
 }
 
+bool Aidge::Unsqueeze_Op::forwardDType(){
+  if (inputsAssociated()) {
+    Log::debug("Unsqueeze_Op: setting output dtype to {}",
+      getInput(0)->dataType());
+      mOutputs[0]->setDataType(getInput(0)->dataType());
+      return true;
+  }
+  Log::notice("Unsqueeze_Op: No input associated, failed to forward data type.");
+  return false;
+}
+
 bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
   // error checking
   if (!inputsAssociated(true)) {
-- 
GitLab


From 9ab835a7fab3ffe7eb98535d4d10efdde325f140 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Sun, 23 Mar 2025 19:11:17 +0000
Subject: [PATCH 02/13] Add forwardDType to FC operator.

---
 include/aidge/operator/FC.hpp | 10 ++++++++--
 src/operator/FC.cpp           | 13 +++++++++++++
 2 files changed, 21 insertions(+), 2 deletions(-)

diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 393e640d6..39d2765c3 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -29,7 +29,7 @@ namespace Aidge {
  * @brief Description of a Fully Connected (FC) operation on an input Tensor.
  *
  * The Fully Connected (FC) operation applies a linear transformation to the input Tensor
- * by multiplying it with a weight matrix and optionally adding a bias vector: 
+ * by multiplying it with a weight matrix and optionally adding a bias vector:
  * - If `bias` is included:
  *   f(x) = x × weights^T + bias
  * - If `bias` is omitted:
@@ -74,7 +74,7 @@ public:
      *
      * Copies the attributes and output tensor(s) of the operator, but does not
      * copy input tensors. The new operator instance has no associated inputs.
-     * 
+     *
      * @param op The `FC_Op` instance to copy.
      */
     FC_Op(const FC_Op& op)
@@ -114,6 +114,12 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Forward the data type.
+     * @return True if successful, false otherwise.
+     */
+    bool forwardDType() override final;
+
     /**
      * @brief Sets the backend for the operator.
      *
diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp
index dd3ed7aba..07208b522 100644
--- a/src/operator/FC.cpp
+++ b/src/operator/FC.cpp
@@ -40,6 +40,19 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh
         mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()});
 }
 
+bool Aidge::FC_Op::forwardDType(){
+    // Current naive forwarDType based on bias.
+    // Bias is optional so this will not always work
+    // But is good enough for now.
+    // Feel free to upgrade the function!
+    if (getInput(2)) {
+        mOutputs[0]->setDataType(getInput(2)->dataType());
+        return true;
+    }
+    Log::notice("FC_Op: No bias associated, failed to forward data type.");
+    return false;
+}
+
 bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) {
     if (inputsAssociated()) {
         // first check weight since it defines inChannels and outChannels
-- 
GitLab


From 5c58ede206a9079a89f2561bb8e16107f7763c23 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 1 Apr 2025 08:12:18 +0000
Subject: [PATCH 03/13] Add forwardDType to Clip operator.

---
 include/aidge/operator/Clip.hpp | 40 ++++++++++++++++++++++++++++--
 src/operator/Clip.cpp           | 43 ++++++++++-----------------------
 2 files changed, 51 insertions(+), 32 deletions(-)

diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp
index 886e74ea1..38ea7347a 100644
--- a/include/aidge/operator/Clip.hpp
+++ b/include/aidge/operator/Clip.hpp
@@ -17,6 +17,8 @@
 #include <limits>
 
 #include "aidge/backend/OperatorImpl.hpp"
+#include "aidge/data/Tensor.hpp"
+
 #include "aidge/graph/Node.hpp"
 #include "aidge/operator/OperatorTensor.hpp"
 #include "aidge/utils/Registrar.hpp"
@@ -117,6 +119,12 @@ public:
     bool dimsForwarded() const override final;
     bool forwardDims(bool allowDataDependency = false) override final;
 
+    /**
+     * @brief Forward the data type.
+     * @return True if successful, false otherwise.
+     */
+    bool forwardDType() override final;
+
     /**
      * @brief Setter to specify the backend to use.
      */
@@ -132,13 +140,41 @@ public:
      * @brief Getter for the minimum clipping value.
      * @return Reference to the minimum value.
      */
-    inline float& min() const noexcept { return mAttributes->getAttr<ClipAttr::Min>(); }
+    inline float& min() const noexcept {
+        if (getInput(1)){
+            if (getInput(1)->size() > 1)
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Expected Input#1 to be scalar (Tensors of empty shapes or of size one)");
+            std::shared_ptr<Tensor> fallback;
+            const auto& minTensor = mInputs[1]->refCastFrom(fallback, DataType::Float32, "cpu");
+            float minValue = *(static_cast<float*>(minTensor.getImpl()->hostPtr()));
+
+            if(mAttributes->getAttr<ClipAttr::Min>() != std::numeric_limits<float>::lowest() && mAttributes->getAttr<ClipAttr::Min>() != minValue)
+                Log::notice("{} : ignoring non-empty min attribute because input#1 take precedence", type());
+            mAttributes->getAttr<ClipAttr::Min>() = minValue;
+        }
+        return mAttributes->getAttr<ClipAttr::Min>();
+    }
 
     /**
      * @brief Getter for the maximum clipping value.
      * @return Reference to the maximum value.
      */
-    inline float& max() const noexcept { return mAttributes->getAttr<ClipAttr::Max>(); }
+    inline float& max() const noexcept {
+        if (getInput(2)){
+            if (getInput(2)->size() > 1)
+                AIDGE_THROW_OR_ABORT(std::runtime_error, "Expected Input#2 to be scalar (Tensors of empty shapes or of size one)");
+
+            std::shared_ptr<Tensor> fallback;
+            const auto& maxTensor = mInputs[2]->refCastFrom(fallback, DataType::Float32, "cpu");
+            float maxValue = *(static_cast<float*>(maxTensor.getImpl()->hostPtr()));
+
+            if(mAttributes->getAttr<ClipAttr::Max>() != std::numeric_limits<float>::max() && mAttributes->getAttr<ClipAttr::Max>() != maxValue)
+                Log::notice("{} : ignoring non-empty max attribute because input#2 take precedence", type());
+            mAttributes->getAttr<ClipAttr::Max>() = maxValue;
+
+        }
+        return mAttributes->getAttr<ClipAttr::Max>();
+    }
 
     std::set<std::string> getAvailableBackends() const override;
 
diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp
index 336673e1e..979c04931 100644
--- a/src/operator/Clip.cpp
+++ b/src/operator/Clip.cpp
@@ -9,8 +9,6 @@
  *
  ********************************************************************************/
 
-#include "aidge/operator/Clip.hpp"
-
 #include <memory>
 #include <string>
 
@@ -50,41 +48,26 @@ bool Clip_Op::dimsForwarded() const {
 }
 
 
-bool Clip_Op::forwardDims(bool allowDataDependency)
+bool Aidge::Clip_Op::forwardDims(bool /*allowDataDependency*/)
 {
-    if (getInput(1) && getInput(1)->size() > 0)
-    {
-        std::shared_ptr<Tensor> fallback;
-        const auto& minTensor = mInputs[1]->refCastFrom(fallback, DataType::Float32, "cpu");
-        float minValue = *(static_cast<float*>(minTensor.getImpl()->hostPtr()));
-
-        if(this->min() != std::numeric_limits<float>::lowest() && this->min() != minValue)
-            Log::notice("{} : ignoring non-empty min attribute because input#1 take precedence", type());
-
-        this->min() = minValue;
-    }
-    if (getInput(2) && getInput(2)->size() > 0)
-    {
-        std::shared_ptr<Tensor> fallback;
-        const auto& maxTensor = mInputs[2]->refCastFrom(fallback, DataType::Float32, "cpu");
-        float maxValue = *(static_cast<float*>(maxTensor.getImpl()->hostPtr()));
-
-       if(this->max() != std::numeric_limits<float>::max() && this->max() != maxValue)
-            Log::notice("{} : ignoring non-empty max attribute because input#2 take precedence", type());
-
-        this->max() = maxValue;
-    }
-
-    if (!inputsAssociated(false)) 
+    if(!getInput(0))
         return false;
     else if ((getInput(1) && getInput(1)->size() > 1) || (getInput(2) && getInput(2)->size() > 1))
         AIDGE_THROW_OR_ABORT(std::runtime_error, "Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes or of size one)");
-
     mOutputs[0]->resize(getInput(0)->dims());
-
     return true;
 }
-void Clip_Op::setBackend(const std::string& name, DeviceIdx_t device) {
+
+bool Aidge::Clip_Op::forwardDType(){
+    if (getInput(0)) {
+        mOutputs[0]->setDataType(getInput(0)->dataType());
+        return true;
+    }
+    Log::warn("Clip_Op: No Input#0 associated, failed to forward data type.");
+    return false;
+}
+
+void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) {
     mImpl = Registrar<Clip_Op>::create(name)(*this);
     mOutputs[0]->setBackend(name, device);
 }
-- 
GitLab


From 445ba9c12b397c90a2457df0b642b65d7a56a3b2 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Tue, 1 Apr 2025 08:14:02 +0000
Subject: [PATCH 04/13] Add forwardDType to Cast operator.

---
 include/aidge/operator/Cast.hpp | 6 ++++++
 src/operator/Cast.cpp           | 5 +++++
 2 files changed, 11 insertions(+)

diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp
index cd37e47d8..2adbcad33 100644
--- a/include/aidge/operator/Cast.hpp
+++ b/include/aidge/operator/Cast.hpp
@@ -98,6 +98,12 @@ public:
      */
     Cast_Op(const Cast_Op& op);
 
+    /**
+     * @brief Forward the data type.
+     * @return True if successful, false otherwise.
+     */
+    bool forwardDType() override final;
+
     /**
      * @brief Clone the operator using its copy constructor.
      * @return A shared pointer to the cloned operator.
diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp
index 128868dcd..587310c06 100644
--- a/src/operator/Cast.cpp
+++ b/src/operator/Cast.cpp
@@ -49,6 +49,11 @@ Cast_Op::Cast_Op(const Cast_Op& op)
     }
 }
 
+bool Aidge::Cast_Op::forwardDType(){
+    mOutputs[0]->setDataType(mAttributes->getAttr<CastAttr::TargetType>());
+    return true;
+}
+
 void Aidge::Cast_Op::setDataType(const DataType& dataType) const {
     if (targetType() != dataType) {
         Log::warn("Cast::setDataType(): Cannot setDataType for cast operator.");
-- 
GitLab


From 20336414e167749f78b00a2bf050a63cf4b96bfd Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Sun, 23 Mar 2025 19:13:00 +0000
Subject: [PATCH 05/13] Add forwardDType to MetaOperator operator.

---
 include/aidge/operator/MetaOperator.hpp | 55 ++++++++++++++++---------
 1 file changed, 35 insertions(+), 20 deletions(-)

diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp
index cbc9cc118..ccd1057d7 100644
--- a/include/aidge/operator/MetaOperator.hpp
+++ b/include/aidge/operator/MetaOperator.hpp
@@ -31,7 +31,7 @@ namespace Aidge {
 /**
  * @class MetaOperator_Op
  * @brief Represents a meta-operator, which is a composition of multiple operators.
- * 
+ *
  * A meta-operator encapsulates a micro-graph of operations, facilitating modularity
  * and reusability. It extends the functionality of `OperatorTensor` and provides
  * features such as cloning, dynamic input association, and custom backend support.
@@ -55,7 +55,7 @@ private:
 public:
     /**
      * @brief Constructor for MetaOperator_Op.
-     * 
+     *
      * @param type The type of the meta-operator.
      * @param graph The micro-graph defining the meta-operator.
      * @param forcedInputsCategory Optional input categories to override default behavior.
@@ -64,16 +64,16 @@ public:
 
     /**
      * @brief Copy constructor.
-     * 
+     *
      * Copies the operator's attributes and output tensors, but not its input tensors.
-     * 
+     *
      * @param op The operator to copy.
      */
     MetaOperator_Op(const MetaOperator_Op& op);
 
     /**
      * @brief Set the node for scheduling.
-     * 
+     *
      * @param node The node to be used as the upper node in the scheduling hierarchy.
      */
     inline void setUpperNode(std::shared_ptr<Node> node) {
@@ -82,16 +82,16 @@ public:
 
     /**
      * @brief Clone this meta-operator.
-     * 
+     *
      * Uses the copy constructor to create a new instance with identical attributes.
-     * 
+     *
      * @return A shared pointer to the cloned operator.
      */
     std::shared_ptr<Operator> clone() const override;
 
     /**
      * @brief Retrieve the micro-graph defining the meta-operator.
-     * 
+     *
      * @return A shared pointer to the micro-graph.
      */
     inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept {
@@ -100,7 +100,7 @@ public:
 
     /**
      * @brief Retrieve the scheduler for the micro-graph.
-     * 
+     *
      * @return A shared pointer to the scheduler.
      */
     inline const std::shared_ptr<SequentialScheduler>& getMicroGraphScheduler() const noexcept {
@@ -109,7 +109,7 @@ public:
 
     /**
      * @brief Associate an input tensor to the operator.
-     * 
+     *
      * @param inputIdx Index of the input tensor.
      * @param data Shared pointer to the data tensor.
      */
@@ -117,7 +117,7 @@ public:
 
     /**
      * @brief Set an input tensor for the operator.
-     * 
+     *
      * @param inputIdx Index of the input tensor.
      * @param data Shared pointer to the data tensor.
      */
@@ -131,7 +131,7 @@ public:
     
     /**
      * @brief Forward the dimensions through the micro-graph.
-     * 
+     *
      * @param allowDataDependency If true, allows data-dependent operations during forwarding.
      * @return True if the operation succeeded, false otherwise.
      */
@@ -143,16 +143,31 @@ public:
         return false;
     }
 
+    /**
+     * @brief Forward the data type through the micro-graph.
+     *
+     * @return True if the operation succeeded, false otherwise.
+     */
+    bool forwardDType() override final {
+        if (inputsAssociated(false)) {
+            // Forward dims of micro-graph
+            return mGraph->forwardDType({});
+        }else{
+            Log::warn("No input associated to metaoperator.");
+        }
+        return false;
+    }
+
     /**
      * @brief Retrieve the backend for the operator.
-     * 
+     *
      * @return The name of the backend.
      */
     std::string backend() const noexcept override;
 
     /**
      * @brief Set the backend for the operator.
-     * 
+     *
      * @param name The name of the backend.
      * @param device The device index.
      */
@@ -160,16 +175,16 @@ public:
 
     /**
      * @brief Get the available backends for the operator.
-     * 
+     *
      * @return A set of available backend names.
      */
     std::set<std::string> getAvailableBackends() const override;
 
     /**
      * @brief Set the data type for the operator.
-     * 
+     *
      * This propagates the data type change to the micro-graph.
-     * 
+     *
      * @param datatype The new data type.
      */
     void setDataType(const DataType &datatype) const override {
@@ -181,7 +196,7 @@ public:
 
     /**
      * @brief Retrieve the dynamic attributes of the operator.
-     * 
+     *
      * @return A shared pointer to the attributes.
      */
     inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; }
@@ -213,7 +228,7 @@ public:
     void backward() override;
     /**
      * @brief Check if the operator is atomic.
-     * 
+     *
      * @return False, as meta-operators are inherently non-atomic.
      */
     inline bool isAtomic() const noexcept override final { return false; }
@@ -222,7 +237,7 @@ public:
 
 /**
  * @brief Helper function to create a MetaOperator node.
- * 
+ *
  * @param type The type of the meta-operator.
  * @param graph The micro-graph defining the meta-operator.
  * @param forcedInputsCategory Optional input categories to override default behavior.
-- 
GitLab


From 2e6e940aba362bc874eae6a077d4c177ea6b9dc0 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Sun, 23 Mar 2025 19:14:08 +0000
Subject: [PATCH 06/13] OperatorTensor::forwarDType no longer require in tensor
 to be defined.

---
 src/operator/OperatorTensor.cpp | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index 050d823ae..e1b803c14 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -173,7 +173,7 @@ bool Aidge::OperatorTensor::forwardDType(){
     Log::debug("Running default forwardDtype for operator {}",
                     type());
 
-    if (inputsAssociated()) {
+    if (inputsAssociated(false)) {
         const auto expectedDType =  getInput(0)->dataType();
         for (std::size_t i = 1; i < nbInputs(); ++i) {
             if (inputCategory(i) == InputCategory::OptionalParam
@@ -182,7 +182,7 @@ bool Aidge::OperatorTensor::forwardDType(){
                 continue;
             }
             if (expectedDType != getInput(i)->dataType()) {
-                Log::notice("{} operator's inputs should have the same datatype: expected {} (input #0), given {} (input #{})",
+                Log::info("{} operator's inputs should have the same datatype: expected {} (input #0), given {} (input #{})",
                     type(), expectedDType, getInput(i)->dataType(), i);
                 return false;
             }
@@ -194,6 +194,8 @@ bool Aidge::OperatorTensor::forwardDType(){
             mOutputs[o]->setDataType(expectedDType);
         }
         return true;
+    }else{
+        Log::info("Inputs are not associated, fail to forward data types.");
     }
 
     return false;
-- 
GitLab


From c408a7a2fec64bb23f222da0918b703117804f4e Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Sun, 23 Mar 2025 19:15:25 +0000
Subject: [PATCH 07/13] Fix GraphView::forwardDType, error with undefined
 tensors.

---
 include/aidge/graph/GraphView.hpp         | 27 +++++++++++++++++++++--
 python_binding/graph/pybind_GraphView.cpp |  5 +++--
 src/graph/GraphView.cpp                   | 25 ++++++++++++++++-----
 3 files changed, 47 insertions(+), 10 deletions(-)

diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp
index 37ddb382d..081c429e8 100644
--- a/include/aidge/graph/GraphView.hpp
+++ b/include/aidge/graph/GraphView.hpp
@@ -295,8 +295,31 @@ public:
      */
     bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false);
 
+    /**
+     * @brief Helper function to compute and forward data type throughout the graph
+     * It will try to infer the best output datatype based on the input datatype which.
+     * To do so it will based itself on the ``OperatorTensor::forwardDataType()`` method.
+     * A generic version of this method is defined in ``OperatorTensor`` and need to
+     * be override to account for special case.
+     *
+     * This method doesn't substitute itself to the user changing manually the data type
+     * of operators but it is preferred to use over ``GraphView::setDataType``.
+     *
+     * @param inputTypes A vector of data type, the order of the vector should be the same
+     * as the order of the inputs of the graph.
+     * @return true if the function succeed to propagate datatype throughout the graph.
+     */
     bool forwardDType(const std::vector<DataType>& inputTypes = {});
 
+
+    /**
+     * @brief Helper that call ``bool forwardDType(const std::vector<DataType>& inputTypes = {})``.
+     *
+     * @param inputType Data type to set for each input of the graph. That will be forwarded.
+     * @return true true if the function succeed to propagate data type throughout the graph.
+     */
+    bool forwardDType(DataType inputType);
+
     /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */
     void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const;
     /** @brief Set the same data type for each Operator of the GraphView object's Nodes. */
@@ -623,10 +646,10 @@ private:
      * - That each node's input matches the expected output from its connected node.
      * - That all mandatory inputs are present and defined.
      * - Logs an error and returns `false` if any inconsistency is detected.
-     *
+     * @param checkDefinedTensor if True, check that each tensors are not undefined.
      * @return `true` if all connections and tensor states are valid, `false` otherwise.
      */
-    bool connectionValid();
+    bool connectionValid(bool checkDefinedTensor = true);
 
     ///////////////////////////////////////////////////////
     //        TOPOLOGY
diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp
index 1d1778c31..d1b99c305 100644
--- a/python_binding/graph/pybind_GraphView.cpp
+++ b/python_binding/graph/pybind_GraphView.cpp
@@ -80,7 +80,7 @@ void init_GraphView(py::module& m) {
           :param include_learnable_parameters: include non-data inputs, like weights and biases, default True.
           :type include_learnable_parameters: bool, optional
           )mydelimiter")
-
+          .def("insert_parent", &GraphView::insertParent, py::arg("child_node"), py::arg("new_parent_node"), py::arg("child_input_tensor_idx"), py::arg("new_parent_input_tensor_idx"), py::arg("new_parent_output_tensor_idx"))
           .def("add_child",
                (void (GraphView::*)(std::shared_ptr<Node>,
                                    std::shared_ptr<Node>,
@@ -128,7 +128,8 @@ void init_GraphView(py::module& m) {
           .def("clone", &GraphView::clone)
           .def("get_nodes", &GraphView::getNodes)
           .def("get_node", &GraphView::getNode, py::arg("node_name"))
-          .def("forward_dtype", &GraphView::forwardDType, py::arg("dtypes") = std::vector<DataType>())
+          .def("forward_dtype", (bool(GraphView::*)(const std::vector<DataType>&)) &GraphView::forwardDType, py::arg("dtypes") = std::vector<DataType>())
+          .def("forward_dtype", (bool(GraphView::*)(DataType)) &GraphView::forwardDType, py::arg("dtype"))
           .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false,
           R"mydelimiter(
             Compute and propagate Tensor dimensions through the GraphView.
diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp
index d28d48dd3..be9c19896 100644
--- a/src/graph/GraphView.cpp
+++ b/src/graph/GraphView.cpp
@@ -451,7 +451,7 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType
     forwardDims(dims);
 }
 
-bool Aidge::GraphView::connectionValid(){
+bool Aidge::GraphView::connectionValid(bool checkDefinedTensor){
     // Ensure every node in the graph is correctly connected
     Log::debug("Verifying graph connections and tensor validity");
     for (std::shared_ptr<Node> nodePtr : getNodes()) {
@@ -470,7 +470,7 @@ bool Aidge::GraphView::connectionValid(){
                         i, nodePtr->name(), nodePtr->type());
                     return false;
                 }
-                if (std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) {
+                if (checkDefinedTensor && std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) {
                     Log::error("Undefined mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]",
                         i, nodePtr->name(), nodePtr->type());
                     return false;
@@ -481,6 +481,10 @@ bool Aidge::GraphView::connectionValid(){
     return true;
 }
 
+bool Aidge::GraphView::forwardDType(DataType inputType){
+    return forwardDType(std::vector<DataType>(getNbDataInputs(), inputType));
+}
+
 bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTypes){
     if (!inputTypes.empty()){
         auto msg = fmt::format("Manually setting GraphView input data type with provided parameters:");
@@ -494,10 +498,12 @@ bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTyp
             const auto& currentTensorPtr =
                 std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second);
             if (i < inputTypes.size()) {
-                if (!currentTensorPtr) { // tensor detected
+                if (!currentTensorPtr) {
                     Log::debug("Creating new tensor for input#{} with dtype {}", i, inputTypes[i]);
                     auto tensor = std::make_shared<Tensor>(inputTypes[i], DataFormat::Default);
                     input.first->getOperator()->setInput(input.second, tensor);
+                }else{
+                  currentTensorPtr->setDataType(inputTypes[i]);
                 }
             }
             else {
@@ -516,7 +522,9 @@ bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTyp
             ++i;
         }
     }
-    if(!connectionValid()) return false;
+
+    if(!connectionValid(false)) return false;
+
     // INITIALIZING Open and Close sets
     std::set<std::shared_ptr<Node>> close;               // Already treated nodes
     std::set<std::shared_ptr<Node>> open = inputNodes(); // Nodes to treat
@@ -532,6 +540,10 @@ bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTyp
         }
     }
     do{
+        Log::debug("List of node to forward data type:");
+        for(auto node : open){
+            Log::debug("\t- Node {} (of type {})", node->name(), node->type());
+        }
         std::set<std::shared_ptr<Node>> newOpen;
         for (const auto& nodePtr : open) {
             if (nodePtr->getOperator()->operatorType() != OperatorType::Tensor) {
@@ -560,7 +572,7 @@ bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTyp
                     nodePtr->name(), nodePtr->type());
 
                 // Recompute every time, even if it was already computed in a
-                // previous call of forwardDims(), as the graph may have changed!
+                // previous call of forwardDType(), as the graph may have changed!
                 close.insert(nodePtr);
                 for (const auto& child : nodePtr->getChildren()) {
                     if (inView(child) && close.find(child) == close.end()) {
@@ -570,7 +582,8 @@ bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTyp
             }
             else {
                 if (parentsForwarded) {
-                    Log::debug("Unable to forward dimensions for node {} (of type {})", nodePtr->name(), nodePtr->type());
+                    Log::error("Unable to forward data type for node {} (of type {})", nodePtr->name(), nodePtr->type());
+
                 }
                 Log::debug("Adding back node {} (of type {}) to the list of nodes to forward data type", nodePtr->name(), nodePtr->type());
                 newOpen.insert(nodePtr);
-- 
GitLab


From 4a06636ad386cae951443e55f3297e81345dbebb Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 27 Mar 2025 13:38:15 +0000
Subject: [PATCH 08/13] Remove Gather::forwardDType().

---
 include/aidge/operator/Gather.hpp | 6 ------
 src/operator/Gather.cpp           | 9 +--------
 2 files changed, 1 insertion(+), 14 deletions(-)

diff --git a/include/aidge/operator/Gather.hpp b/include/aidge/operator/Gather.hpp
index 4ce9f7a49..8bd8239ec 100644
--- a/include/aidge/operator/Gather.hpp
+++ b/include/aidge/operator/Gather.hpp
@@ -111,12 +111,6 @@ public:
      */
     bool forwardDims(bool allowDataDependency = false) override final;
 
-    /**
-     * @brief Forward the data type.
-     * @return True if successful, false otherwise.
-     */
-    bool forwardDType() override final;
-
     /**
      * @brief Set the backend for the operator.
      * @param name The name of the backend.
diff --git a/src/operator/Gather.cpp b/src/operator/Gather.cpp
index ccef5ec53..a4cb4aab0 100644
--- a/src/operator/Gather.cpp
+++ b/src/operator/Gather.cpp
@@ -59,14 +59,7 @@ bool Aidge::Gather_Op::dimsForwarded() const {
 
     return OperatorTensor::dimsForwarded();
 }
-bool Aidge::Gather_Op::forwardDType(){
-    if (inputsAssociated()) {
-        mOutputs[0]->setDataType(getInput(0)->dataType());
-        return true;
-    }
-    Log::notice("Gather_Op: No input associated, failed to forward data type.");
-    return false;
-}
+
 bool Aidge::Gather_Op::forwardDims(bool allowDataDependency) {
     if (inputsAssociated()) {
         // Copy optional input #1, if present, to attribute Indices
-- 
GitLab


From da06710b62623ed9bf48888140937f451b68f77d Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 27 Mar 2025 13:43:51 +0000
Subject: [PATCH 09/13] Fix OperatorTensor::forwardDims() if OptionalData is
 not connected.

---
 python_binding/operator/pybind_OperatorTensor.cpp | 1 +
 src/operator/OperatorTensor.cpp                   | 6 ++++++
 2 files changed, 7 insertions(+)

diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp
index 2602e115d..350c0958a 100644
--- a/python_binding/operator/pybind_OperatorTensor.cpp
+++ b/python_binding/operator/pybind_OperatorTensor.cpp
@@ -33,6 +33,7 @@ void init_OperatorTensor(py::module& m){
     .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&) const) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data"))
     .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data"))
     .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false)
+    .def("forward_dtype", &OperatorTensor::forwardDType)
     .def("dims_forwarded", &OperatorTensor::dimsForwarded)
     ;
 }
diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp
index e1b803c14..b7aa5e707 100644
--- a/src/operator/OperatorTensor.cpp
+++ b/src/operator/OperatorTensor.cpp
@@ -181,6 +181,12 @@ bool Aidge::OperatorTensor::forwardDType(){
                 // Param input can be different dtype than data input
                 continue;
             }
+            if (inputCategory(i) == InputCategory::OptionalData
+            && !getInput(i)){
+                // If OptionalData is not set, skip
+                continue;
+            }
+
             if (expectedDType != getInput(i)->dataType()) {
                 Log::info("{} operator's inputs should have the same datatype: expected {} (input #0), given {} (input #{})",
                     type(), expectedDType, getInput(i)->dataType(), i);
-- 
GitLab


From daf3e51b3151f207b0c0d679fc83002d2c1a7c60 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 27 Mar 2025 13:45:44 +0000
Subject: [PATCH 10/13] Update Reshape::forwardDType().

---
 include/aidge/operator/Reshape.hpp | 1 +
 src/operator/Reshape.cpp           | 5 ++++-
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp
index f8bfaf73b..4d0e21e27 100644
--- a/include/aidge/operator/Reshape.hpp
+++ b/include/aidge/operator/Reshape.hpp
@@ -122,6 +122,7 @@ public:
 
     /**
      * @brief Forward the data type.
+     * Output datatype is the same as input 0.
      * @return True if successful, false otherwise.
      */
     bool forwardDType() override final;
diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp
index b4cd272a1..3df66f293 100644
--- a/src/operator/Reshape.cpp
+++ b/src/operator/Reshape.cpp
@@ -60,7 +60,10 @@ bool Aidge::Reshape_Op::dimsForwarded() const {
     return OperatorTensor::dimsForwarded();
 }
 bool Aidge::Reshape_Op::forwardDType(){
-    if (inputsAssociated()) {
+    // Note: Override required because shape input is an optional data.
+    // Meaning default implementation will fail since:
+    // input[0] dtype != input[1] dtype.
+    if (inputsAssociated(false)) {
         mOutputs[0]->setDataType(getInput(0)->dataType());
         return true;
     }
-- 
GitLab


From 51f5600c1a6bcf17e43cec3a73479050b7b21ec2 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 27 Mar 2025 13:49:45 +0000
Subject: [PATCH 11/13] Remove Unsqueeze::forwardDType().

---
 include/aidge/operator/Unsqueeze.hpp |  5 -----
 src/operator/Unsqueeze.cpp           | 11 -----------
 2 files changed, 16 deletions(-)

diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp
index b8b367090..4a66c37b2 100644
--- a/include/aidge/operator/Unsqueeze.hpp
+++ b/include/aidge/operator/Unsqueeze.hpp
@@ -105,11 +105,6 @@ public:
    * @brief Compute dimensions for the output Tensor
    */
   bool forwardDims(bool allowDataDependency = false) override final;
-  /**
-   * @brief Forward the data type.
-   * @return True if successful, false otherwise.
-   */
-  bool forwardDType() override final;
 
   bool dimsForwarded() const override final;
 
diff --git a/src/operator/Unsqueeze.cpp b/src/operator/Unsqueeze.cpp
index 23d310bbe..679b420ec 100644
--- a/src/operator/Unsqueeze.cpp
+++ b/src/operator/Unsqueeze.cpp
@@ -55,17 +55,6 @@ bool Aidge::Unsqueeze_Op::dimsForwarded() const {
   return OperatorTensor::dimsForwarded();
 }
 
-bool Aidge::Unsqueeze_Op::forwardDType(){
-  if (inputsAssociated()) {
-    Log::debug("Unsqueeze_Op: setting output dtype to {}",
-      getInput(0)->dataType());
-      mOutputs[0]->setDataType(getInput(0)->dataType());
-      return true;
-  }
-  Log::notice("Unsqueeze_Op: No input associated, failed to forward data type.");
-  return false;
-}
-
 bool Unsqueeze_Op::forwardDims(bool allowDataDependency) {
   // error checking
   if (!inputsAssociated(true)) {
-- 
GitLab


From 7a17beb3e8971d97ad43f4d946fed8fc80cce1c4 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 27 Mar 2025 13:57:03 +0000
Subject: [PATCH 12/13] Update FC docstring.

---
 include/aidge/operator/FC.hpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp
index 39d2765c3..e513c3059 100644
--- a/include/aidge/operator/FC.hpp
+++ b/include/aidge/operator/FC.hpp
@@ -116,6 +116,7 @@ public:
 
     /**
      * @brief Forward the data type.
+     * The output is set to be the same type as the bias input.
      * @return True if successful, false otherwise.
      */
     bool forwardDType() override final;
-- 
GitLab


From 2f2535bb448d17589458c14d3b212879ded522d0 Mon Sep 17 00:00:00 2001
From: cmoineau <cyril.moineau@cea.fr>
Date: Thu, 27 Mar 2025 14:11:21 +0000
Subject: [PATCH 13/13] Add unit test for forwardDType function.

---
 aidge_core/unit_tests/test_forwardDType.py | 415 +++++++++++++++++++++
 1 file changed, 415 insertions(+)
 create mode 100644 aidge_core/unit_tests/test_forwardDType.py

diff --git a/aidge_core/unit_tests/test_forwardDType.py b/aidge_core/unit_tests/test_forwardDType.py
new file mode 100644
index 000000000..b08f29206
--- /dev/null
+++ b/aidge_core/unit_tests/test_forwardDType.py
@@ -0,0 +1,415 @@
+"""
+Copyright (c) 2023 CEA-List
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License 2.0 which is available at
+http://www.eclipse.org/legal/epl-2.0.
+
+SPDX-License-Identifier: EPL-2.0
+"""
+
+import unittest
+import aidge_core
+import numpy as np
+
+# List of all dtype defined by Aidge
+ALL_AIDGE_DTYPE = [i for i in aidge_core.dtype.__members__.values() if i != aidge_core.dtype.any]
+
+oh_no =[]
+
+class test_forwardDType(unittest.TestCase):
+    def setUp(self):
+        pass
+
+    def tearDown(self):
+        pass
+
+    ### HELPER FUNCTIONS ###
+    def verify_node_out_dtype(self, node, out_dtype):
+        """Helper function to verify output data type of a node
+        """
+        operator = node.get_operator()
+        self.assertEqual(operator.nb_outputs(), len(out_dtype), "Error in test design, the number of outputs provided does not correspond to the number of outputs of the operator.")
+        for out_idx in range(operator.nb_outputs()):
+            tensor_dtype = operator.get_output(out_idx).dtype()
+            self.assertEqual(tensor_dtype, out_dtype[out_idx], f"Node {node.name()}({node.type()}) output#{out_idx} is {tensor_dtype}, expected {out_dtype[out_idx]}")
+
+    def run_node_test(self, node, in_dtype, out_dtype):
+        """Run forwardDType unit test on the graph
+
+        :param graph: GraphView to call forwardDtype on
+        :type graph: aidge_core.GraphView
+        :param in_dtype: List of input type to forward
+        :type in_dtype: List[aidge_core.dtype]
+        :param out_dtype: List of expected output type
+        :type out_dtype: List[aidge_core.dtype]
+        """
+        op = node.get_operator()
+
+        for in_idx in range(len(in_dtype)):
+            in_tensor = aidge_core.Tensor()
+            in_tensor.set_datatype(in_dtype[in_idx])
+            op.set_input(in_idx, in_tensor)
+
+        self.assertTrue(op.forward_dtype(), "Forward data type failed")
+        self.verify_node_out_dtype(node, out_dtype)
+
+    def run_graph_test(self, graph, in_dtype, out_dtype):
+        """Run forwardDType unit test on the graph
+
+        :param graph: GraphView to call forwardDtype on
+        :type graph: aidge_core.GraphView
+        :param in_dtype: List of input type to forward
+        :type in_dtype: List[aidge_core.dtype]
+        :param out_dtype: Dictionary of node name and expected output type
+        :type out_dtype: Dict[str: List[aidge_core.dtype]]
+        """
+        # Loop to create an empty tensor for each operator outputs
+        # This replace a forwardDims!
+        # for node in graph.get_nodes():
+        #     op = node.get_operator()
+        #     if op.type() == aidge_core.ProducerOp.Type and op.attr.constant:
+        #         # Cannot set_output for constant Producer
+        #         continue
+        #     for out_idx in range(op.nb_outputs()):
+        #         out_tensor = aidge_core.Tensor()
+        #         oh_no.append(out_tensor)
+        #         op.set_output(out_idx, out_tensor)
+
+        self.assertTrue(graph.forward_dtype(in_dtype), "Forward data type failed")
+        for node in graph.get_nodes():
+            if node.name() not in out_dtype:
+                print(f"Warning: {node.name()}({node.type()}) if not tested!")
+            else:
+                self.verify_node_out_dtype(node, out_dtype[node.name()])
+
+    ### TESTING_OPERATORS ###
+    # Please ensure test cases are written in alphabetic order!
+
+    def test_Abs_forward_dtype(self):
+        pass
+
+    def test_Add_forward_dtype(self):
+        for in_dtype in ALL_AIDGE_DTYPE:
+            with self.subTest(dtype=f"Add forward_dtype: {in_dtype}"):
+                node = aidge_core.Div(name="add")
+                self.run_node_test(node, [in_dtype, in_dtype], [in_dtype])
+
+    def test_And_forward_dtype(self):
+        pass
+
+    def test_ArgMax_forward_dtype(self):
+        pass
+
+    def test_Atan_forward_dtype(self):
+        pass
+
+    def test_AvgPooling_forward_dtype(self):
+        pass
+
+    def test_BatchNorm_forward_dtype(self):
+        pass
+
+    def test_BitShift_forward_dtype(self):
+        pass
+
+    def test_Cast_forward_dtype(self):
+        for cast_dtype in ALL_AIDGE_DTYPE:
+            for in_dtype in ALL_AIDGE_DTYPE:
+                with self.subTest(dtype=f"Cast[{in_dtype}] forward_dtype:  {cast_dtype}"):
+                    cast = aidge_core.Cast(cast_dtype, name="Cast")
+                    # Whatever input type, expected out type is cast_dtype
+                    self.run_node_test(cast, [in_dtype], [cast_dtype])
+
+    def test_Clip_forward_dtype(self):
+        pass
+
+    def test_Concat_forward_dtype(self):
+        pass
+
+    def test_ConstantOfShape_forward_dtype(self):
+        pass
+
+    def test_Conv_forward_dtype(self):
+        pass
+
+    def test_ConvDepthWise_forward_dtype(self):
+        pass
+
+    def test_ConvTranspose_forward_dtype(self):
+        pass
+
+    def test_CryptoHash_forward_dtype(self):
+        pass
+
+    def test_DepthToSpace_forward_dtype(self):
+        pass
+
+    def test_Div_forward_dtype(self):
+        for in_dtype in ALL_AIDGE_DTYPE:
+            with self.subTest(dtype=f"Div forward_dtype: {in_dtype}"):
+                node = aidge_core.Div(name="Div")
+                self.run_node_test(node, [in_dtype, in_dtype], [in_dtype])
+
+    def test_Equal_forward_dtype(self):
+        pass
+
+    def test_Erf_forward_dtype(self):
+        pass
+
+    def test_Expand_forward_dtype(self):
+        pass
+
+    def test_FC_forward_dtype(self):
+        test_cases = [
+            ("float32", [aidge_core.dtype.float32] * 3, [aidge_core.dtype.float32]),
+            ("int8", [aidge_core.dtype.int8, aidge_core.dtype.int8, aidge_core.dtype.int32], [aidge_core.dtype.int32]),
+        ]
+
+        for name, in_dtype, out_dtype in test_cases:
+            with self.subTest(dtype=name):
+                node = aidge_core.FC(1, 1, name="FC")
+                self.run_node_test(node, in_dtype, out_dtype)
+
+    def test_Flatten_forward_dtype(self):
+        pass
+
+    def test_Fold_forward_dtype(self):
+        pass
+
+    def test_Gather_forward_dtype(self):
+        pass
+
+    def test_GenericOperator_forward_dtype(self):
+        pass
+
+    def test_GlobalAveragePooling_forward_dtype(self):
+        pass
+
+    def test_GridSample_forward_dtype(self):
+        pass
+
+    def test_Heaviside_forward_dtype(self):
+        pass
+
+    def test_ILayerNorm_forward_dtype(self):
+        pass
+
+    def test_Identity_forward_dtype(self):
+        pass
+
+    def test_LRN_forward_dtype(self):
+        pass
+
+    def test_LeakyReLU_forward_dtype(self):
+        pass
+
+    def test_Ln_forward_dtype(self):
+        pass
+
+    def test_MatMul_forward_dtype(self):
+        pass
+
+    def test_MaxPooling_forward_dtype(self):
+        pass
+
+    def test_Memorize_forward_dtype(self):
+        pass
+
+    def test_MetaOperator_forward_dtype(self):
+        pass
+
+    def test_MetaOperatorDefs_forward_dtype(self):
+        pass
+
+    def test_Mod_forward_dtype(self):
+        pass
+
+    def test_Move_forward_dtype(self):
+        pass
+
+    def test_Mul_forward_dtype(self):
+        for in_dtype in ALL_AIDGE_DTYPE:
+            with self.subTest(dtype=f"Mul forward_dtype: {in_dtype}"):
+                node = aidge_core.Mul(name="Mul")
+                self.run_node_test(node, [in_dtype, in_dtype], [in_dtype])
+
+    def test_Pad_forward_dtype(self):
+        pass
+
+    def test_Pop_forward_dtype(self):
+        pass
+
+    def test_Pow_forward_dtype(self):
+        pass
+
+    def test_Producer_forward_dtype(self):
+        pass
+
+    def test_ReLU_forward_dtype(self):
+        for in_dtype in ALL_AIDGE_DTYPE:
+            with self.subTest(dtype=f"ReLU forward_dtype: {in_dtype}"):
+                node = aidge_core.ReLU(name="Relu")
+                self.run_node_test(node, [in_dtype], [in_dtype])
+
+    def test_ReduceMean_forward_dtype(self):
+        pass
+
+    def test_ReduceSum_forward_dtype(self):
+        pass
+
+    def test_Reshape_forward_dtype(self):
+        pass
+
+    def test_Resize_forward_dtype(self):
+        pass
+
+    def test_Round_forward_dtype(self):
+        pass
+
+    def test_Scaling_forward_dtype(self):
+        pass
+
+    def test_Select_forward_dtype(self):
+        pass
+
+    def test_Shape_forward_dtype(self):
+        pass
+
+    def test_ShiftGELU_forward_dtype(self):
+        pass
+
+    def test_ShiftMax_forward_dtype(self):
+        pass
+
+    def test_Sigmoid_forward_dtype(self):
+        pass
+
+    def test_Slice_forward_dtype(self):
+        pass
+
+    def test_Softmax_forward_dtype(self):
+        pass
+
+    def test_Split_forward_dtype(self):
+        pass
+
+    def test_Sqrt_forward_dtype(self):
+        pass
+
+    def test_Squeeze_forward_dtype(self):
+        pass
+
+    def test_Stack_forward_dtype(self):
+        pass
+
+    def test_Sub_forward_dtype(self):
+        for in_dtype in ALL_AIDGE_DTYPE:
+            with self.subTest(dtype=f"Add forward_dtype: {in_dtype}"):
+                node = aidge_core.Sub(name="sub")
+                self.run_node_test(node, [in_dtype, in_dtype], [in_dtype])
+
+    def test_Tanh_forward_dtype(self):
+        pass
+
+    def test_Transpose_forward_dtype(self):
+        pass
+
+    def test_Unfold_forward_dtype(self):
+        pass
+
+    def test_Unsqueeze_forward_dtype(self):
+        pass
+
+    def test_WeightInterleaving_forward_dtype(self):
+        pass
+
+
+    ### TESTING GRAPH ###
+
+    def test_shuffle_net(self):
+        # Declaring constant values
+        prod_two_a = aidge_core.Producer(aidge_core.Tensor(np.array(2, dtype=np.int64)), "two_a", constant=True)
+        prod_two_b = aidge_core.Producer(aidge_core.Tensor(np.array(2, dtype=np.int64)), "two_b", constant=True)
+
+        # Declaring operators
+        shape_op_1     = aidge_core.Shape(name="shape_op_1")
+        shape_op_2     = aidge_core.Shape(name="shape_op_2")
+        shape_op_3     = aidge_core.Shape(name="shape_op_3")
+        shape_op_4     = aidge_core.Shape(name="shape_op_4")
+        gather_op_1    = aidge_core.Gather(axis = 0, indices = [0], name="gather_op_1")
+        gather_op_2    = aidge_core.Gather(axis = 0, indices = [1], name="gather_op_2")
+        gather_op_3    = aidge_core.Gather(axis = 0, indices = [2], name="gather_op_3")
+        gather_op_4    = aidge_core.Gather(axis = 0, indices = [3], name="gather_op_4")
+        div_op         = aidge_core.Div(name="div_op")
+
+        u_op_1         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_1")
+        u_op_2         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_2")
+        u_op_3         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_3")
+        u_op_4         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_4")
+        u_op_5         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_5")
+        u_op_6         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_6")
+        u_op_7         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_7")
+        u_op_8         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_8")
+        u_op_9         = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_9")
+        concat_op_1    = aidge_core.Concat(5, name="concat_op_1")
+        concat_op_2    = aidge_core.Concat(4, name="concat_op_2")
+        reshape_op_1   = aidge_core.Reshape(name=  "reshape_op_1")
+        reshape_op_2   = aidge_core.Reshape(name=  "reshape_op_2")
+        transpose_op_1 = aidge_core.Transpose([0, 2, 1, 3, 4], name="transpose_op_1")
+
+        # Declaring Connectors
+        x = aidge_core.Connector(aidge_core.Identity(f"Input"))
+        a = aidge_core.Connector(prod_two_a)
+        b = aidge_core.Connector(prod_two_b)
+
+        # Graph creation using functional declaration
+        x1 = shape_op_1(x)
+        x2 = shape_op_2(x)
+        x3 = shape_op_3(x)
+        x4 = shape_op_4(x)
+        n = gather_op_1(x1)
+        c = gather_op_2(x2)
+        h = gather_op_3(x3)
+        w = gather_op_4(x4)
+
+        shape_1 = concat_op_1(u_op_1(n), u_op_2(a), u_op_3(div_op(c, b)), u_op_4(h), u_op_5(w))
+        shape_2 = concat_op_2(u_op_6(n), u_op_7(c), u_op_8(h), u_op_9(w))
+
+        y = reshape_op_2(transpose_op_1(reshape_op_1(x, shape_1)), shape_2)
+
+        shuffle_net_graph = aidge_core.generate_graph([y])
+        for in_dtype in ALL_AIDGE_DTYPE:
+            with self.subTest(dtype=f"ShuffleNet {in_dtype}"):
+                output_dtype = {
+                    "shape_op_1":     [aidge_core.dtype.int64],
+                    "shape_op_2":     [aidge_core.dtype.int64],
+                    "shape_op_3":     [aidge_core.dtype.int64],
+                    "shape_op_4":     [aidge_core.dtype.int64],
+                    "gather_op_1":    [aidge_core.dtype.int64],
+                    "gather_op_3":    [aidge_core.dtype.int64],
+                    "gather_op_2":    [aidge_core.dtype.int64],
+                    "gather_op_4":    [aidge_core.dtype.int64],
+                    "div_op":         [aidge_core.dtype.int64],
+                    "unsqueeze_op_1": [aidge_core.dtype.int64],
+                    "unsqueeze_op_2": [aidge_core.dtype.int64],
+                    "unsqueeze_op_3": [aidge_core.dtype.int64],
+                    "unsqueeze_op_4": [aidge_core.dtype.int64],
+                    "unsqueeze_op_5": [aidge_core.dtype.int64],
+                    "unsqueeze_op_6": [aidge_core.dtype.int64],
+                    "unsqueeze_op_7": [aidge_core.dtype.int64],
+                    "unsqueeze_op_8": [aidge_core.dtype.int64],
+                    "unsqueeze_op_9": [aidge_core.dtype.int64],
+                    "concat_op_1":    [aidge_core.dtype.int64],
+                    "concat_op_2":    [aidge_core.dtype.int64],
+                    "two_a":          [aidge_core.dtype.int64],
+                    "two_b":          [aidge_core.dtype.int64],
+                    "reshape_op_1":   [in_dtype],
+                    "reshape_op_2":   [in_dtype],
+                    "transpose_op_1": [in_dtype],
+                    "Input":          [in_dtype]
+                }
+                self.run_graph_test(shuffle_net_graph, [in_dtype], output_dtype)
+
+if __name__ == '__main__':
+    unittest.main()
-- 
GitLab