diff --git a/aidge_core/unit_tests/test_forwardDType.py b/aidge_core/unit_tests/test_forwardDType.py new file mode 100644 index 0000000000000000000000000000000000000000..b08f29206e3df1fce9763c1027577c101151992e --- /dev/null +++ b/aidge_core/unit_tests/test_forwardDType.py @@ -0,0 +1,415 @@ +""" +Copyright (c) 2023 CEA-List + +This program and the accompanying materials are made available under the +terms of the Eclipse Public License 2.0 which is available at +http://www.eclipse.org/legal/epl-2.0. + +SPDX-License-Identifier: EPL-2.0 +""" + +import unittest +import aidge_core +import numpy as np + +# List of all dtype defined by Aidge +ALL_AIDGE_DTYPE = [i for i in aidge_core.dtype.__members__.values() if i != aidge_core.dtype.any] + +oh_no =[] + +class test_forwardDType(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + ### HELPER FUNCTIONS ### + def verify_node_out_dtype(self, node, out_dtype): + """Helper function to verify output data type of a node + """ + operator = node.get_operator() + self.assertEqual(operator.nb_outputs(), len(out_dtype), "Error in test design, the number of outputs provided does not correspond to the number of outputs of the operator.") + for out_idx in range(operator.nb_outputs()): + tensor_dtype = operator.get_output(out_idx).dtype() + self.assertEqual(tensor_dtype, out_dtype[out_idx], f"Node {node.name()}({node.type()}) output#{out_idx} is {tensor_dtype}, expected {out_dtype[out_idx]}") + + def run_node_test(self, node, in_dtype, out_dtype): + """Run forwardDType unit test on the graph + + :param graph: GraphView to call forwardDtype on + :type graph: aidge_core.GraphView + :param in_dtype: List of input type to forward + :type in_dtype: List[aidge_core.dtype] + :param out_dtype: List of expected output type + :type out_dtype: List[aidge_core.dtype] + """ + op = node.get_operator() + + for in_idx in range(len(in_dtype)): + in_tensor = aidge_core.Tensor() + in_tensor.set_datatype(in_dtype[in_idx]) + op.set_input(in_idx, in_tensor) + + self.assertTrue(op.forward_dtype(), "Forward data type failed") + self.verify_node_out_dtype(node, out_dtype) + + def run_graph_test(self, graph, in_dtype, out_dtype): + """Run forwardDType unit test on the graph + + :param graph: GraphView to call forwardDtype on + :type graph: aidge_core.GraphView + :param in_dtype: List of input type to forward + :type in_dtype: List[aidge_core.dtype] + :param out_dtype: Dictionary of node name and expected output type + :type out_dtype: Dict[str: List[aidge_core.dtype]] + """ + # Loop to create an empty tensor for each operator outputs + # This replace a forwardDims! + # for node in graph.get_nodes(): + # op = node.get_operator() + # if op.type() == aidge_core.ProducerOp.Type and op.attr.constant: + # # Cannot set_output for constant Producer + # continue + # for out_idx in range(op.nb_outputs()): + # out_tensor = aidge_core.Tensor() + # oh_no.append(out_tensor) + # op.set_output(out_idx, out_tensor) + + self.assertTrue(graph.forward_dtype(in_dtype), "Forward data type failed") + for node in graph.get_nodes(): + if node.name() not in out_dtype: + print(f"Warning: {node.name()}({node.type()}) if not tested!") + else: + self.verify_node_out_dtype(node, out_dtype[node.name()]) + + ### TESTING_OPERATORS ### + # Please ensure test cases are written in alphabetic order! + + def test_Abs_forward_dtype(self): + pass + + def test_Add_forward_dtype(self): + for in_dtype in ALL_AIDGE_DTYPE: + with self.subTest(dtype=f"Add forward_dtype: {in_dtype}"): + node = aidge_core.Div(name="add") + self.run_node_test(node, [in_dtype, in_dtype], [in_dtype]) + + def test_And_forward_dtype(self): + pass + + def test_ArgMax_forward_dtype(self): + pass + + def test_Atan_forward_dtype(self): + pass + + def test_AvgPooling_forward_dtype(self): + pass + + def test_BatchNorm_forward_dtype(self): + pass + + def test_BitShift_forward_dtype(self): + pass + + def test_Cast_forward_dtype(self): + for cast_dtype in ALL_AIDGE_DTYPE: + for in_dtype in ALL_AIDGE_DTYPE: + with self.subTest(dtype=f"Cast[{in_dtype}] forward_dtype: {cast_dtype}"): + cast = aidge_core.Cast(cast_dtype, name="Cast") + # Whatever input type, expected out type is cast_dtype + self.run_node_test(cast, [in_dtype], [cast_dtype]) + + def test_Clip_forward_dtype(self): + pass + + def test_Concat_forward_dtype(self): + pass + + def test_ConstantOfShape_forward_dtype(self): + pass + + def test_Conv_forward_dtype(self): + pass + + def test_ConvDepthWise_forward_dtype(self): + pass + + def test_ConvTranspose_forward_dtype(self): + pass + + def test_CryptoHash_forward_dtype(self): + pass + + def test_DepthToSpace_forward_dtype(self): + pass + + def test_Div_forward_dtype(self): + for in_dtype in ALL_AIDGE_DTYPE: + with self.subTest(dtype=f"Div forward_dtype: {in_dtype}"): + node = aidge_core.Div(name="Div") + self.run_node_test(node, [in_dtype, in_dtype], [in_dtype]) + + def test_Equal_forward_dtype(self): + pass + + def test_Erf_forward_dtype(self): + pass + + def test_Expand_forward_dtype(self): + pass + + def test_FC_forward_dtype(self): + test_cases = [ + ("float32", [aidge_core.dtype.float32] * 3, [aidge_core.dtype.float32]), + ("int8", [aidge_core.dtype.int8, aidge_core.dtype.int8, aidge_core.dtype.int32], [aidge_core.dtype.int32]), + ] + + for name, in_dtype, out_dtype in test_cases: + with self.subTest(dtype=name): + node = aidge_core.FC(1, 1, name="FC") + self.run_node_test(node, in_dtype, out_dtype) + + def test_Flatten_forward_dtype(self): + pass + + def test_Fold_forward_dtype(self): + pass + + def test_Gather_forward_dtype(self): + pass + + def test_GenericOperator_forward_dtype(self): + pass + + def test_GlobalAveragePooling_forward_dtype(self): + pass + + def test_GridSample_forward_dtype(self): + pass + + def test_Heaviside_forward_dtype(self): + pass + + def test_ILayerNorm_forward_dtype(self): + pass + + def test_Identity_forward_dtype(self): + pass + + def test_LRN_forward_dtype(self): + pass + + def test_LeakyReLU_forward_dtype(self): + pass + + def test_Ln_forward_dtype(self): + pass + + def test_MatMul_forward_dtype(self): + pass + + def test_MaxPooling_forward_dtype(self): + pass + + def test_Memorize_forward_dtype(self): + pass + + def test_MetaOperator_forward_dtype(self): + pass + + def test_MetaOperatorDefs_forward_dtype(self): + pass + + def test_Mod_forward_dtype(self): + pass + + def test_Move_forward_dtype(self): + pass + + def test_Mul_forward_dtype(self): + for in_dtype in ALL_AIDGE_DTYPE: + with self.subTest(dtype=f"Mul forward_dtype: {in_dtype}"): + node = aidge_core.Mul(name="Mul") + self.run_node_test(node, [in_dtype, in_dtype], [in_dtype]) + + def test_Pad_forward_dtype(self): + pass + + def test_Pop_forward_dtype(self): + pass + + def test_Pow_forward_dtype(self): + pass + + def test_Producer_forward_dtype(self): + pass + + def test_ReLU_forward_dtype(self): + for in_dtype in ALL_AIDGE_DTYPE: + with self.subTest(dtype=f"ReLU forward_dtype: {in_dtype}"): + node = aidge_core.ReLU(name="Relu") + self.run_node_test(node, [in_dtype], [in_dtype]) + + def test_ReduceMean_forward_dtype(self): + pass + + def test_ReduceSum_forward_dtype(self): + pass + + def test_Reshape_forward_dtype(self): + pass + + def test_Resize_forward_dtype(self): + pass + + def test_Round_forward_dtype(self): + pass + + def test_Scaling_forward_dtype(self): + pass + + def test_Select_forward_dtype(self): + pass + + def test_Shape_forward_dtype(self): + pass + + def test_ShiftGELU_forward_dtype(self): + pass + + def test_ShiftMax_forward_dtype(self): + pass + + def test_Sigmoid_forward_dtype(self): + pass + + def test_Slice_forward_dtype(self): + pass + + def test_Softmax_forward_dtype(self): + pass + + def test_Split_forward_dtype(self): + pass + + def test_Sqrt_forward_dtype(self): + pass + + def test_Squeeze_forward_dtype(self): + pass + + def test_Stack_forward_dtype(self): + pass + + def test_Sub_forward_dtype(self): + for in_dtype in ALL_AIDGE_DTYPE: + with self.subTest(dtype=f"Add forward_dtype: {in_dtype}"): + node = aidge_core.Sub(name="sub") + self.run_node_test(node, [in_dtype, in_dtype], [in_dtype]) + + def test_Tanh_forward_dtype(self): + pass + + def test_Transpose_forward_dtype(self): + pass + + def test_Unfold_forward_dtype(self): + pass + + def test_Unsqueeze_forward_dtype(self): + pass + + def test_WeightInterleaving_forward_dtype(self): + pass + + + ### TESTING GRAPH ### + + def test_shuffle_net(self): + # Declaring constant values + prod_two_a = aidge_core.Producer(aidge_core.Tensor(np.array(2, dtype=np.int64)), "two_a", constant=True) + prod_two_b = aidge_core.Producer(aidge_core.Tensor(np.array(2, dtype=np.int64)), "two_b", constant=True) + + # Declaring operators + shape_op_1 = aidge_core.Shape(name="shape_op_1") + shape_op_2 = aidge_core.Shape(name="shape_op_2") + shape_op_3 = aidge_core.Shape(name="shape_op_3") + shape_op_4 = aidge_core.Shape(name="shape_op_4") + gather_op_1 = aidge_core.Gather(axis = 0, indices = [0], name="gather_op_1") + gather_op_2 = aidge_core.Gather(axis = 0, indices = [1], name="gather_op_2") + gather_op_3 = aidge_core.Gather(axis = 0, indices = [2], name="gather_op_3") + gather_op_4 = aidge_core.Gather(axis = 0, indices = [3], name="gather_op_4") + div_op = aidge_core.Div(name="div_op") + + u_op_1 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_1") + u_op_2 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_2") + u_op_3 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_3") + u_op_4 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_4") + u_op_5 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_5") + u_op_6 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_6") + u_op_7 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_7") + u_op_8 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_8") + u_op_9 = aidge_core.Unsqueeze(axes = [0], name="unsqueeze_op_9") + concat_op_1 = aidge_core.Concat(5, name="concat_op_1") + concat_op_2 = aidge_core.Concat(4, name="concat_op_2") + reshape_op_1 = aidge_core.Reshape(name= "reshape_op_1") + reshape_op_2 = aidge_core.Reshape(name= "reshape_op_2") + transpose_op_1 = aidge_core.Transpose([0, 2, 1, 3, 4], name="transpose_op_1") + + # Declaring Connectors + x = aidge_core.Connector(aidge_core.Identity(f"Input")) + a = aidge_core.Connector(prod_two_a) + b = aidge_core.Connector(prod_two_b) + + # Graph creation using functional declaration + x1 = shape_op_1(x) + x2 = shape_op_2(x) + x3 = shape_op_3(x) + x4 = shape_op_4(x) + n = gather_op_1(x1) + c = gather_op_2(x2) + h = gather_op_3(x3) + w = gather_op_4(x4) + + shape_1 = concat_op_1(u_op_1(n), u_op_2(a), u_op_3(div_op(c, b)), u_op_4(h), u_op_5(w)) + shape_2 = concat_op_2(u_op_6(n), u_op_7(c), u_op_8(h), u_op_9(w)) + + y = reshape_op_2(transpose_op_1(reshape_op_1(x, shape_1)), shape_2) + + shuffle_net_graph = aidge_core.generate_graph([y]) + for in_dtype in ALL_AIDGE_DTYPE: + with self.subTest(dtype=f"ShuffleNet {in_dtype}"): + output_dtype = { + "shape_op_1": [aidge_core.dtype.int64], + "shape_op_2": [aidge_core.dtype.int64], + "shape_op_3": [aidge_core.dtype.int64], + "shape_op_4": [aidge_core.dtype.int64], + "gather_op_1": [aidge_core.dtype.int64], + "gather_op_3": [aidge_core.dtype.int64], + "gather_op_2": [aidge_core.dtype.int64], + "gather_op_4": [aidge_core.dtype.int64], + "div_op": [aidge_core.dtype.int64], + "unsqueeze_op_1": [aidge_core.dtype.int64], + "unsqueeze_op_2": [aidge_core.dtype.int64], + "unsqueeze_op_3": [aidge_core.dtype.int64], + "unsqueeze_op_4": [aidge_core.dtype.int64], + "unsqueeze_op_5": [aidge_core.dtype.int64], + "unsqueeze_op_6": [aidge_core.dtype.int64], + "unsqueeze_op_7": [aidge_core.dtype.int64], + "unsqueeze_op_8": [aidge_core.dtype.int64], + "unsqueeze_op_9": [aidge_core.dtype.int64], + "concat_op_1": [aidge_core.dtype.int64], + "concat_op_2": [aidge_core.dtype.int64], + "two_a": [aidge_core.dtype.int64], + "two_b": [aidge_core.dtype.int64], + "reshape_op_1": [in_dtype], + "reshape_op_2": [in_dtype], + "transpose_op_1": [in_dtype], + "Input": [in_dtype] + } + self.run_graph_test(shuffle_net_graph, [in_dtype], output_dtype) + +if __name__ == '__main__': + unittest.main() diff --git a/include/aidge/graph/GraphView.hpp b/include/aidge/graph/GraphView.hpp index c6e3322aea3ee74322571b6619e5b02f857ef12e..081c429e869a7897d9a24b4633f87a7f6efd68e3 100644 --- a/include/aidge/graph/GraphView.hpp +++ b/include/aidge/graph/GraphView.hpp @@ -295,6 +295,31 @@ public: */ bool forwardDims(const std::vector<std::vector<DimSize_t>>& dims = {}, bool allowDataDependency = false); + /** + * @brief Helper function to compute and forward data type throughout the graph + * It will try to infer the best output datatype based on the input datatype which. + * To do so it will based itself on the ``OperatorTensor::forwardDataType()`` method. + * A generic version of this method is defined in ``OperatorTensor`` and need to + * be override to account for special case. + * + * This method doesn't substitute itself to the user changing manually the data type + * of operators but it is preferred to use over ``GraphView::setDataType``. + * + * @param inputTypes A vector of data type, the order of the vector should be the same + * as the order of the inputs of the graph. + * @return true if the function succeed to propagate datatype throughout the graph. + */ + bool forwardDType(const std::vector<DataType>& inputTypes = {}); + + + /** + * @brief Helper that call ``bool forwardDType(const std::vector<DataType>& inputTypes = {})``. + * + * @param inputType Data type to set for each input of the graph. That will be forwarded. + * @return true true if the function succeed to propagate data type throughout the graph. + */ + bool forwardDType(DataType inputType); + /** @brief Set the same backend for each Operator of the GraphView object's Nodes. */ void setBackend(const std::string& backend, const DeviceIdx_t device = 0) const; /** @brief Set the same data type for each Operator of the GraphView object's Nodes. */ @@ -613,6 +638,19 @@ private: */ void updateInputsOutputsDelete(NodePtr deletedNode); + /** + * @brief Validates the connectivity and tensor integrity of the graph. + * + * This function ensures that all nodes in the graph are correctly connected + * and that mandatory input tensors are properly defined. It verifies: + * - That each node's input matches the expected output from its connected node. + * - That all mandatory inputs are present and defined. + * - Logs an error and returns `false` if any inconsistency is detected. + * @param checkDefinedTensor if True, check that each tensors are not undefined. + * @return `true` if all connections and tensor states are valid, `false` otherwise. + */ + bool connectionValid(bool checkDefinedTensor = true); + /////////////////////////////////////////////////////// // TOPOLOGY /////////////////////////////////////////////////////// diff --git a/include/aidge/operator/Cast.hpp b/include/aidge/operator/Cast.hpp index cd37e47d84c9a03d047b38d6e5cd4f8e84423d2d..2adbcad3392c3033f78caa923fb3cd0b78f03703 100644 --- a/include/aidge/operator/Cast.hpp +++ b/include/aidge/operator/Cast.hpp @@ -98,6 +98,12 @@ public: */ Cast_Op(const Cast_Op& op); + /** + * @brief Forward the data type. + * @return True if successful, false otherwise. + */ + bool forwardDType() override final; + /** * @brief Clone the operator using its copy constructor. * @return A shared pointer to the cloned operator. diff --git a/include/aidge/operator/Clip.hpp b/include/aidge/operator/Clip.hpp index 886e74ea188b8826fce4b950eae874d47e0194bd..763ba7c292c574614c6cc7deac75e6718e50fc1b 100644 --- a/include/aidge/operator/Clip.hpp +++ b/include/aidge/operator/Clip.hpp @@ -17,6 +17,8 @@ #include <limits> #include "aidge/backend/OperatorImpl.hpp" +#include "aidge/data/Tensor.hpp" + #include "aidge/graph/Node.hpp" #include "aidge/operator/OperatorTensor.hpp" #include "aidge/utils/Registrar.hpp" @@ -117,6 +119,12 @@ public: bool dimsForwarded() const override final; bool forwardDims(bool allowDataDependency = false) override final; + /** + * @brief Forward the data type. + * @return True if successful, false otherwise. + */ + bool forwardDType() override final; + /** * @brief Setter to specify the backend to use. */ @@ -132,13 +140,39 @@ public: * @brief Getter for the minimum clipping value. * @return Reference to the minimum value. */ - inline float& min() const noexcept { return mAttributes->getAttr<ClipAttr::Min>(); } + inline float& min() const noexcept { + if (getInput(1)){ + if(mAttributes->getAttr<ClipAttr::Min>() != std::numeric_limits<float>::lowest()) + { + Log::notice("{} : ignoring non-empty min attribute because input#1 " + "take precedence", + type()); + } + std::shared_ptr<Tensor> fallback; + const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType_v<float>, "cpu"); + mAttributes->getAttr<ClipAttr::Min>() = *(static_cast<float*>(minV.getImpl()->hostPtr())); + } + return mAttributes->getAttr<ClipAttr::Min>(); + } /** * @brief Getter for the maximum clipping value. * @return Reference to the maximum value. */ - inline float& max() const noexcept { return mAttributes->getAttr<ClipAttr::Max>(); } + inline float& max() const noexcept { + if (getInput(2)){ + if(mAttributes->getAttr<ClipAttr::Max>() != std::numeric_limits<float>::max()) + { + Log::notice("{} : ignoring non-empty max attribute because input#2 " + "take precedence", + type()); + } + std::shared_ptr<Tensor> fallback; + const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType_v<float>, "cpu"); + mAttributes->getAttr<ClipAttr::Max>() = *(static_cast<float*>(maxV.getImpl()->hostPtr())); + } + return mAttributes->getAttr<ClipAttr::Max>(); + } std::set<std::string> getAvailableBackends() const override; diff --git a/include/aidge/operator/FC.hpp b/include/aidge/operator/FC.hpp index 393e640d60934059a9c216a9335a7018388fe9da..e513c3059fa3819ac81f26a373b92de9d3cba3fc 100644 --- a/include/aidge/operator/FC.hpp +++ b/include/aidge/operator/FC.hpp @@ -29,7 +29,7 @@ namespace Aidge { * @brief Description of a Fully Connected (FC) operation on an input Tensor. * * The Fully Connected (FC) operation applies a linear transformation to the input Tensor - * by multiplying it with a weight matrix and optionally adding a bias vector: + * by multiplying it with a weight matrix and optionally adding a bias vector: * - If `bias` is included: * f(x) = x × weights^T + bias * - If `bias` is omitted: @@ -74,7 +74,7 @@ public: * * Copies the attributes and output tensor(s) of the operator, but does not * copy input tensors. The new operator instance has no associated inputs. - * + * * @param op The `FC_Op` instance to copy. */ FC_Op(const FC_Op& op) @@ -114,6 +114,13 @@ public: */ bool forwardDims(bool allowDataDependency = false) override final; + /** + * @brief Forward the data type. + * The output is set to be the same type as the bias input. + * @return True if successful, false otherwise. + */ + bool forwardDType() override final; + /** * @brief Sets the backend for the operator. * diff --git a/include/aidge/operator/MetaOperator.hpp b/include/aidge/operator/MetaOperator.hpp index cbc9cc1180ee0d7ec2d82ad37abc2ec4f1c945d7..ccd1057d7f5b5f2544bcb3e497831f785e3b306e 100644 --- a/include/aidge/operator/MetaOperator.hpp +++ b/include/aidge/operator/MetaOperator.hpp @@ -31,7 +31,7 @@ namespace Aidge { /** * @class MetaOperator_Op * @brief Represents a meta-operator, which is a composition of multiple operators. - * + * * A meta-operator encapsulates a micro-graph of operations, facilitating modularity * and reusability. It extends the functionality of `OperatorTensor` and provides * features such as cloning, dynamic input association, and custom backend support. @@ -55,7 +55,7 @@ private: public: /** * @brief Constructor for MetaOperator_Op. - * + * * @param type The type of the meta-operator. * @param graph The micro-graph defining the meta-operator. * @param forcedInputsCategory Optional input categories to override default behavior. @@ -64,16 +64,16 @@ public: /** * @brief Copy constructor. - * + * * Copies the operator's attributes and output tensors, but not its input tensors. - * + * * @param op The operator to copy. */ MetaOperator_Op(const MetaOperator_Op& op); /** * @brief Set the node for scheduling. - * + * * @param node The node to be used as the upper node in the scheduling hierarchy. */ inline void setUpperNode(std::shared_ptr<Node> node) { @@ -82,16 +82,16 @@ public: /** * @brief Clone this meta-operator. - * + * * Uses the copy constructor to create a new instance with identical attributes. - * + * * @return A shared pointer to the cloned operator. */ std::shared_ptr<Operator> clone() const override; /** * @brief Retrieve the micro-graph defining the meta-operator. - * + * * @return A shared pointer to the micro-graph. */ inline const std::shared_ptr<GraphView>& getMicroGraph() const noexcept { @@ -100,7 +100,7 @@ public: /** * @brief Retrieve the scheduler for the micro-graph. - * + * * @return A shared pointer to the scheduler. */ inline const std::shared_ptr<SequentialScheduler>& getMicroGraphScheduler() const noexcept { @@ -109,7 +109,7 @@ public: /** * @brief Associate an input tensor to the operator. - * + * * @param inputIdx Index of the input tensor. * @param data Shared pointer to the data tensor. */ @@ -117,7 +117,7 @@ public: /** * @brief Set an input tensor for the operator. - * + * * @param inputIdx Index of the input tensor. * @param data Shared pointer to the data tensor. */ @@ -131,7 +131,7 @@ public: /** * @brief Forward the dimensions through the micro-graph. - * + * * @param allowDataDependency If true, allows data-dependent operations during forwarding. * @return True if the operation succeeded, false otherwise. */ @@ -143,16 +143,31 @@ public: return false; } + /** + * @brief Forward the data type through the micro-graph. + * + * @return True if the operation succeeded, false otherwise. + */ + bool forwardDType() override final { + if (inputsAssociated(false)) { + // Forward dims of micro-graph + return mGraph->forwardDType({}); + }else{ + Log::warn("No input associated to metaoperator."); + } + return false; + } + /** * @brief Retrieve the backend for the operator. - * + * * @return The name of the backend. */ std::string backend() const noexcept override; /** * @brief Set the backend for the operator. - * + * * @param name The name of the backend. * @param device The device index. */ @@ -160,16 +175,16 @@ public: /** * @brief Get the available backends for the operator. - * + * * @return A set of available backend names. */ std::set<std::string> getAvailableBackends() const override; /** * @brief Set the data type for the operator. - * + * * This propagates the data type change to the micro-graph. - * + * * @param datatype The new data type. */ void setDataType(const DataType &datatype) const override { @@ -181,7 +196,7 @@ public: /** * @brief Retrieve the dynamic attributes of the operator. - * + * * @return A shared pointer to the attributes. */ inline std::shared_ptr<Attributes> attributes() const override { return mAttributes; } @@ -213,7 +228,7 @@ public: void backward() override; /** * @brief Check if the operator is atomic. - * + * * @return False, as meta-operators are inherently non-atomic. */ inline bool isAtomic() const noexcept override final { return false; } @@ -222,7 +237,7 @@ public: /** * @brief Helper function to create a MetaOperator node. - * + * * @param type The type of the meta-operator. * @param graph The micro-graph defining the meta-operator. * @param forcedInputsCategory Optional input categories to override default behavior. diff --git a/include/aidge/operator/OperatorTensor.hpp b/include/aidge/operator/OperatorTensor.hpp index 1b2035222d18eaec043d770f36806da922370f87..ae66a3081eeccd2ed397459867de3c6727285c3e 100644 --- a/include/aidge/operator/OperatorTensor.hpp +++ b/include/aidge/operator/OperatorTensor.hpp @@ -172,6 +172,16 @@ public: */ virtual bool forwardDims(bool allowDataDependency = false); + /** + * @brief Computes the data type of the operator's output tensor based on input data type. + * + * For each operator inputs: + * - If input is an (optional) Param, the operator will forward + * + * @return True if data types are successfully computed, false otherwise. + */ + virtual bool forwardDType(); + /** * @brief Checks if dimensions have been successfully forwarded. * @return True if dimensions are forwarded, false otherwise. @@ -189,7 +199,7 @@ public: /** * @brief Sets the data type of the operator's tensors. - * @warning Sets all outputs but only inputs of category + * @warning Sets all outputs but only inputs of category * @code InputCategory::Param @endcode & @code InputCategory::OptionnalParam @endcode * @param dataType Data type to set. */ diff --git a/include/aidge/operator/Reshape.hpp b/include/aidge/operator/Reshape.hpp index c93ef09c9dd35ca887b0b491bd8c1177dbbb35e1..4d0e21e27696262924fd5bd99673aea99081b750 100644 --- a/include/aidge/operator/Reshape.hpp +++ b/include/aidge/operator/Reshape.hpp @@ -120,6 +120,12 @@ public: */ bool forwardDims(bool allowDataDependency = false) override final; + /** + * @brief Forward the data type. + * Output datatype is the same as input 0. + * @return True if successful, false otherwise. + */ + bool forwardDType() override final; /** * @brief Set the backend for the Reshape operator. * @param[in] name Name of the backend. diff --git a/include/aidge/operator/Shape.hpp b/include/aidge/operator/Shape.hpp index 4028c4041584833f14a4fa4db0f944dca2c2f035..3d5d02f917c2465d587498ae65a1f6d6308f4256 100644 --- a/include/aidge/operator/Shape.hpp +++ b/include/aidge/operator/Shape.hpp @@ -108,6 +108,12 @@ public: */ bool forwardDims(bool /*allowDataDependency*/ = false) override final; + /** + * @brief Forward the data type. + * @return True if successful, false otherwise. + */ + bool forwardDType() override final; + /** * @brief Set the backend for the Shape operator. * @param[in] name Name of the backend. diff --git a/include/aidge/operator/Unsqueeze.hpp b/include/aidge/operator/Unsqueeze.hpp index 27b3851fc7b741955889f7119bdf2b829918950a..4a66c37b2d56962259e9e00c75987a54194f2eb4 100644 --- a/include/aidge/operator/Unsqueeze.hpp +++ b/include/aidge/operator/Unsqueeze.hpp @@ -105,6 +105,7 @@ public: * @brief Compute dimensions for the output Tensor */ bool forwardDims(bool allowDataDependency = false) override final; + bool dimsForwarded() const override final; void setBackend(const std::string &name, diff --git a/python_binding/graph/pybind_GraphView.cpp b/python_binding/graph/pybind_GraphView.cpp index abb1a9eca0bc3edb1ee0faaecb9f6cd9bc52e167..d1b99c305d0e067a74c13a33cde062b2c6f2ddfa 100644 --- a/python_binding/graph/pybind_GraphView.cpp +++ b/python_binding/graph/pybind_GraphView.cpp @@ -80,7 +80,7 @@ void init_GraphView(py::module& m) { :param include_learnable_parameters: include non-data inputs, like weights and biases, default True. :type include_learnable_parameters: bool, optional )mydelimiter") - + .def("insert_parent", &GraphView::insertParent, py::arg("child_node"), py::arg("new_parent_node"), py::arg("child_input_tensor_idx"), py::arg("new_parent_input_tensor_idx"), py::arg("new_parent_output_tensor_idx")) .def("add_child", (void (GraphView::*)(std::shared_ptr<Node>, std::shared_ptr<Node>, @@ -128,6 +128,8 @@ void init_GraphView(py::module& m) { .def("clone", &GraphView::clone) .def("get_nodes", &GraphView::getNodes) .def("get_node", &GraphView::getNode, py::arg("node_name")) + .def("forward_dtype", (bool(GraphView::*)(const std::vector<DataType>&)) &GraphView::forwardDType, py::arg("dtypes") = std::vector<DataType>()) + .def("forward_dtype", (bool(GraphView::*)(DataType)) &GraphView::forwardDType, py::arg("dtype")) .def("forward_dims", &GraphView::forwardDims, py::arg("dims")=std::vector<std::vector<DimSize_t>>(), py::arg("allow_data_dependency") = false, R"mydelimiter( Compute and propagate Tensor dimensions through the GraphView. @@ -209,7 +211,7 @@ void init_GraphView(py::module& m) { :param dims: input dimension to forward :type dims: List[List[Int]] - + )mydelimiter") .def("__call__", &GraphView::operator(), py::arg("connectors")) .def("set_datatype", &GraphView::setDataType, py::arg("datatype")) diff --git a/python_binding/operator/pybind_OperatorTensor.cpp b/python_binding/operator/pybind_OperatorTensor.cpp index 2602e115d43d805451aa9f0836c8151b2cd4b109..350c0958a478ed699e393e815f01eeac177e92fc 100644 --- a/python_binding/operator/pybind_OperatorTensor.cpp +++ b/python_binding/operator/pybind_OperatorTensor.cpp @@ -33,6 +33,7 @@ void init_OperatorTensor(py::module& m){ .def("set_output", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&) const) &OperatorTensor::setOutput, py::arg("outputIdx"), py::arg("data")) .def("set_input", (void (OperatorTensor::*)(const IOIndex_t, const std::shared_ptr<Data>&)) &OperatorTensor::setInput, py::arg("outputIdx"), py::arg("data")) .def("forward_dims", &OperatorTensor::forwardDims, py::arg("allow_data_dependency") = false) + .def("forward_dtype", &OperatorTensor::forwardDType) .def("dims_forwarded", &OperatorTensor::dimsForwarded) ; } diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 3f146c74557936d025a25e74c30140e2ae40da75..9f66caca9386e3191f27846ad37bfc9bcfb24d49 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -450,6 +450,160 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType forwardDims(dims); } +bool Aidge::GraphView::connectionValid(bool checkDefinedTensor){ + // Ensure every node in the graph is correctly connected + Log::debug("Verifying graph connections and tensor validity"); + for (std::shared_ptr<Node> nodePtr : getNodes()) { + for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) { + std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i); + if (inputI.first) { + if (nodePtr->getOperator()->getRawInput(i) != inputI.first->getOperator()->getRawOutput(inputI.second)) { + Log::error("Connection mismatch: Input#{} of node [\033[1m\033[3m{}\033[0m (\033[1m\033[3m{}\033[0m)] -> Output#{} of node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", + i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type()); + return false; + } + } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && + nodePtr->inputCategory(i) != InputCategory::OptionalParam) { + if (!nodePtr->getOperator()->getRawInput(i)) { + Log::error("Missing mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", + i, nodePtr->name(), nodePtr->type()); + return false; + } + if (checkDefinedTensor && std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) { + Log::error("Undefined mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", + i, nodePtr->name(), nodePtr->type()); + return false; + } + } + } + } + return true; +} + +bool Aidge::GraphView::forwardDType(DataType inputType){ + return forwardDType(std::vector<DataType>(getNbDataInputs(), inputType)); +} + +bool Aidge::GraphView::forwardDType(const std::vector<Aidge::DataType>& inputTypes){ + if (!inputTypes.empty()){ + auto msg = fmt::format("Manually setting GraphView input data type with provided parameters:"); + for (std::size_t i = 0; i< inputTypes.size(); ++i) + msg = fmt::format("{}\n\t* input#{} {}", msg, i, inputTypes[i]); + Log::info("{}", msg); + + Log::debug("Validating input dtype against existing graph inputs"); + std::size_t i = 0; + for (auto& input : mInputNodes) { + const auto& currentTensorPtr = + std::dynamic_pointer_cast<OperatorTensor>(input.first->getOperator())->getInput(input.second); + if (i < inputTypes.size()) { + if (!currentTensorPtr) { + Log::debug("Creating new tensor for input#{} with dtype {}", i, inputTypes[i]); + auto tensor = std::make_shared<Tensor>(inputTypes[i], DataFormat::Default); + input.first->getOperator()->setInput(input.second, tensor); + }else{ + currentTensorPtr->setDataType(inputTypes[i]); + } + } + else { + const bool optional = (input.first->inputCategory(input.second) == InputCategory::OptionalData + || input.first->inputCategory(input.second) == InputCategory::OptionalParam); + + if (currentTensorPtr) { + Log::debug("Using existing data type {} for graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])", + currentTensorPtr->dataType(), i, input.second, input.first->name(), input.first->type()); + } + else if (!optional) { + Log::warn("Missing data type for mandatory graph input#{} (matching input#{} of node [\033[1m\033[3m{}\033[0m] - [\033[1m\033[3m{}\033[0m])", + i, input.second, input.first->name(), input.first->type()); + } + } + ++i; + } + } + + if(!connectionValid(false)) return false; + + // INITIALIZING Open and Close sets + std::set<std::shared_ptr<Node>> close; // Already treated nodes + std::set<std::shared_ptr<Node>> open = inputNodes(); // Nodes to treat + for (const auto& nodePtr : getNodes()) { + if (nodePtr->type() == Producer_Op::Type) { + // Producers dType is set by user + // So it is considered already treated + close.insert(nodePtr); + // Producers childs are put in open list + for (const auto& child : nodePtr->getChildren()) { + if (inView(child)) open.insert(child); + } + } + } + do{ + Log::debug("List of node to forward data type:"); + for(auto node : open){ + Log::debug("\t- Node {} (of type {})", node->name(), node->type()); + } + std::set<std::shared_ptr<Node>> newOpen; + for (const auto& nodePtr : open) { + if (nodePtr->getOperator()->operatorType() != OperatorType::Tensor) { + AIDGE_THROW_OR_ABORT(std::runtime_error, "Node {} (of type {}) as it is not an OperatorTensor. ForwardDType is currently only supported for OperatorTensor.", nodePtr->name(), nodePtr->type()); + } + const auto op = std::static_pointer_cast<OperatorTensor>(nodePtr->getOperator()); + bool anyParent = false; + bool parentsForwarded = true; + for (const auto& parent : nodePtr->getParents()) { + if (parent != nullptr && inView(parent) && close.find(parent) == close.end()) { + Log::debug("Data type not forwarded for parent (node {} (of type {})) of node {} (of type {})", + parent->name(), parent->type(), nodePtr->name(), nodePtr->type()); + parentsForwarded = false; + } + else { + anyParent = true; + } + } + // Special rule for Memorize_Op, which only requires one parent + // to have its dtype forwarded. This avoids circular dependency. + if (nodePtr->type() == Memorize_Op::Type && anyParent) { + parentsForwarded = true; + } + if (parentsForwarded && op->forwardDType()) { + Log::debug("Data type forwarded for node {} (of type {})", + nodePtr->name(), nodePtr->type()); + + // Recompute every time, even if it was already computed in a + // previous call of forwardDType(), as the graph may have changed! + close.insert(nodePtr); + for (const auto& child : nodePtr->getChildren()) { + if (inView(child) && close.find(child) == close.end()) { + newOpen.insert(child); + } + } + } + else { + if (parentsForwarded) { + Log::error("Unable to forward data type for node {} (of type {})", nodePtr->name(), nodePtr->type()); + + } + Log::debug("Adding back node {} (of type {}) to the list of nodes to forward data type", nodePtr->name(), nodePtr->type()); + newOpen.insert(nodePtr); + } + + } + if (newOpen == open) { + // We are stuck! + std::vector<std::string> nodesName; + std::transform(newOpen.begin(), newOpen.end(), + std::back_inserter(nodesName), + [](auto val){ return val->name() + " (" + val->type() + ")"; }); + + Log::warn("Unable to forward data type (circular dependency and/or wrong dimensions and/or data dependent dimension?). Unable to compute output data type for nodes {}.", nodesName); + return false; + } + open.swap(newOpen); + }while(!open.empty()); + return open.empty(); +} + bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) { Log::debug("Starting dimension forward propagation for GraphView"); // remove current Data connections and use dummy inputs to propagate dimensions @@ -498,32 +652,7 @@ bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_ } } - // Ensure every node in the graph is correctly connected - Log::debug("Verifying graph connections and tensor validity"); - for (std::shared_ptr<Node> nodePtr : getNodes()) { - for (IOIndex_t i = 0; i < nodePtr->nbInputs(); ++i) { - std::pair<std::shared_ptr<Node>, IOIndex_t> inputI = nodePtr->input(i); - if (inputI.first) { - if (nodePtr->getOperator()->getRawInput(i) != inputI.first->getOperator()->getRawOutput(inputI.second)) { - Log::error("Connection mismatch: Input#{} of node [\033[1m\033[3m{}\033[0m (\033[1m\033[3m{}\033[0m)] -> Output#{} of node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", - i, nodePtr->name(), nodePtr->type(), inputI.second, inputI.first->name(), inputI.first->type()); - return false; - } - } else if (nodePtr->inputCategory(i) != InputCategory::OptionalData && - nodePtr->inputCategory(i) != InputCategory::OptionalParam) { - if (!nodePtr->getOperator()->getRawInput(i)) { - Log::error("Missing mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", - i, nodePtr->name(), nodePtr->type()); - return false; - } - if (std::static_pointer_cast<Tensor>(nodePtr->getOperator()->getRawInput(i))->undefined()) { - Log::error("Undefined mandatory input#{} for node [\033[1m\033[3m{}\033[0m - (\033[1m\033[3m{}\033[0m)]", - i, nodePtr->name(), nodePtr->type()); - return false; - } - } - } - } + if(!connectionValid()) return false; Log::debug("Initializing dimension propagation"); // Establish initial list of dims forwardable nodes: graph input node + Producers childs diff --git a/src/operator/Cast.cpp b/src/operator/Cast.cpp index 128868dcd3f39c16316b67a09abe471c47b2df33..0f64977335fd30a87648c91e4d701469749afffc 100644 --- a/src/operator/Cast.cpp +++ b/src/operator/Cast.cpp @@ -37,6 +37,11 @@ Cast_Op::Cast_Op(const DataType targetType) mOutputs[0]->setDataType(targetType); } +bool Aidge::Cast_Op::forwardDType(){ + mOutputs[0]->setDataType(mAttributes->getAttr<CastAttr::TargetType>()); + return true; +} + Cast_Op::Cast_Op(const Cast_Op& op) : OperatorTensor(op), mAttributes(op.mAttributes) diff --git a/src/operator/Clip.cpp b/src/operator/Clip.cpp index 87ac105daf249ef1888307d5cda14a2e432d0aaf..c898837a7caeead551c9da1fcaca270aa6d1f599 100644 --- a/src/operator/Clip.cpp +++ b/src/operator/Clip.cpp @@ -9,8 +9,6 @@ * ********************************************************************************/ -#include "aidge/operator/Clip.hpp" - #include <memory> #include <string> @@ -50,55 +48,26 @@ bool Clip_Op::dimsForwarded() const { } -bool Clip_Op::forwardDims(bool allowDataDependency) +bool Aidge::Clip_Op::forwardDims(bool /*allowDataDependency*/) { - if (getInput(1) ) - { - if( this->min() != std::numeric_limits<float>::lowest()) - { - Log::notice("{} : ignoring non-empty min attribute because input#1 " - "take precedence", - type()); - } - if (!allowDataDependency) { - Log::warn("{} : unable to forwardDims() because output dims are data " - "dependent on input#1", - type()); - return false; - } - std::shared_ptr<Tensor> fallback; - const auto& minV = mInputs[1]->refCastFrom(fallback, NativeType_v<float>, "cpu"); - this->min() = *(static_cast<float*>(minV.getImpl()->hostPtr())); - } - if (getInput(2)) - { - if( this->max() != std::numeric_limits<float>::max()) - { - Log::notice("{} : ignoring non-empty max attribute because input#2 " - "take precedence", - type()); - } - if (!allowDataDependency) { - Log::warn("{} : unable to forwardDims() because output dims are data " - "dependent on input#2", - type()); - return false; - } - std::shared_ptr<Tensor> fallback; - const auto& maxV = mInputs[2]->refCastFrom(fallback, NativeType_v<float>, "cpu"); - this->max() = *(static_cast<float*>(maxV.getImpl()->hostPtr())); - } - if (!inputsAssociated(false)) { + if(!getInput(0)){ + Log::error("Clip_Op: Input#0 is not connected, failed to forward dimensions."); return false; } - else if ((getInput(1) && !getInput(1)->empty()) || (getInput(2) && !getInput(2)->empty())) - { - AIDGE_THROW_OR_ABORT(std::runtime_error,"Expected Input#1 and Input#2 to be scalar (Tensors of empty shapes)"); - } mOutputs[0] -> resize(getInput(0)->dims()); return true; } -void Clip_Op::setBackend(const std::string& name, DeviceIdx_t device) { + +bool Aidge::Clip_Op::forwardDType(){ + if (getInput(0)) { + mOutputs[0]->setDataType(getInput(0)->dataType()); + return true; + } + Log::warn("Clip_Op: No Input#0 associated, failed to forward data type."); + return false; +} + +void Aidge::Clip_Op::setBackend(const std::string& name, Aidge::DeviceIdx_t device) { mImpl = Registrar<Clip_Op>::create(name)(*this); mOutputs[0]->setBackend(name, device); } diff --git a/src/operator/FC.cpp b/src/operator/FC.cpp index dd3ed7aba65cf1875d691d9bc2c8c94bb03856c7..07208b5221326eaf1c0cfd8829c97dc4543c659b 100644 --- a/src/operator/FC.cpp +++ b/src/operator/FC.cpp @@ -40,6 +40,19 @@ void Aidge::FC_Op::associateInput(const Aidge::IOIndex_t inputIdx, const std::sh mInputs[inputIdx]->resize({1, getInput(inputIdx)->size()}); } +bool Aidge::FC_Op::forwardDType(){ + // Current naive forwarDType based on bias. + // Bias is optional so this will not always work + // But is good enough for now. + // Feel free to upgrade the function! + if (getInput(2)) { + mOutputs[0]->setDataType(getInput(2)->dataType()); + return true; + } + Log::notice("FC_Op: No bias associated, failed to forward data type."); + return false; +} + bool Aidge::FC_Op::forwardDims(bool /*allowDataDependency*/) { if (inputsAssociated()) { // first check weight since it defines inChannels and outChannels diff --git a/src/operator/OperatorTensor.cpp b/src/operator/OperatorTensor.cpp index 1c5b0a0d396dd7dcab98ccae685d747274165463..bbe6d506d48e4c45be39179ea467a0dfaf74c900 100644 --- a/src/operator/OperatorTensor.cpp +++ b/src/operator/OperatorTensor.cpp @@ -42,7 +42,7 @@ Aidge::OperatorTensor::OperatorTensor(const OperatorTensor& other) mOutputs(std::vector<std::shared_ptr<Tensor>>(other.nbOutputs())) { for (std::size_t i = 0; i < static_cast<std::size_t>(nbOutputs()); ++i) { // The characteristics of the output tensors are copied for two reasons: - // - Consistency with the operator: the output tensors backend should + // - Consistency with the operator: the output tensors backend should // match the operator backend, which is always copied. // - The user would expect that the data type and format are copied as // well. @@ -169,6 +169,44 @@ bool Aidge::OperatorTensor::forwardDims(bool /*allowDataDependency*/) { return false; } +bool Aidge::OperatorTensor::forwardDType(){ + Log::debug("Running default forwardDtype for operator {}", + type()); + + if (inputsAssociated(false)) { + const auto expectedDType = getInput(0)->dataType(); + for (std::size_t i = 1; i < nbInputs(); ++i) { + if (inputCategory(i) == InputCategory::OptionalParam + || inputCategory(i) == InputCategory::Param){ + // Param input can be different dtype than data input + continue; + } + if (inputCategory(i) == InputCategory::OptionalData + && !getInput(i)){ + // If OptionalData is not set, skip + continue; + } + + if (expectedDType != getInput(i)->dataType()) { + Log::info("{} operator's inputs should have the same datatype: expected {} (input #0), given {} (input #{})", + type(), expectedDType, getInput(i)->dataType(), i); + return false; + } + } + + for (std::size_t o = 0; o < nbOutputs(); ++o) { + Log::debug("Setting output#{} dtype to {}", + o, expectedDType); + mOutputs[o]->setDataType(expectedDType); + } + return true; + }else{ + Log::info("Inputs are not associated, fail to forward data types."); + } + + return false; +} + bool Aidge::OperatorTensor::dimsForwarded() const { bool forwarded = true; // check both inputs and outputs have been filled diff --git a/src/operator/Reshape.cpp b/src/operator/Reshape.cpp index b12fd486d16beb0a676e38cfdf808fa71996a5ba..3df66f293186dc88057dced68640420b265bb3d2 100644 --- a/src/operator/Reshape.cpp +++ b/src/operator/Reshape.cpp @@ -59,6 +59,17 @@ bool Aidge::Reshape_Op::dimsForwarded() const { return OperatorTensor::dimsForwarded(); } +bool Aidge::Reshape_Op::forwardDType(){ + // Note: Override required because shape input is an optional data. + // Meaning default implementation will fail since: + // input[0] dtype != input[1] dtype. + if (inputsAssociated(false)) { + mOutputs[0]->setDataType(getInput(0)->dataType()); + return true; + } + Log::notice("Reshape_Op: No input associated, failed to forward data type."); + return false; +} bool Aidge::Reshape_Op::forwardDims(bool allowDataDependency) { if (inputsAssociated()) { diff --git a/src/operator/Shape.cpp b/src/operator/Shape.cpp index 4db4704739b362426adb1831c1c95b3796aa918a..4791a14a5e4fa10a58bceccc46e537df8ac63cd0 100644 --- a/src/operator/Shape.cpp +++ b/src/operator/Shape.cpp @@ -49,7 +49,10 @@ Aidge::Shape_Op::Shape_Op(const Aidge::Shape_Op& op) std::shared_ptr<Aidge::Operator> Aidge::Shape_Op::clone() const { return std::make_shared<Shape_Op>(*this); } - +bool Aidge::Shape_Op::forwardDType(){ + mOutputs[0]->setDataType(DataType::Int64); + return true; +} bool Aidge::Shape_Op::forwardDims(bool /*allowDataDependency*/) { if (inputsAssociated()) { if (this->start() < 0)