From 97819e44bb3c9e373c4929d96419054b358fa039 Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Mon, 24 Jun 2024 11:44:46 +0000 Subject: [PATCH] [Upd] files according to parameter changes [Fix] (tmp) GraphView::forward_dims() if dimsension provided and Tensor already associated --- aidge_core/export/node_export.py | 5 +- .../unit_tests/test_operator_binding.py | 61 +++++++++---------- aidge_core/unit_tests/test_parameters.py | 6 +- aidge_core/unit_tests/test_tensor.py | 6 +- src/graph/GraphView.cpp | 11 +++- src/recipes/ConstantFolding.cpp | 2 +- src/recipes/ExplicitCastMove.cpp | 6 +- src/recipes/ExplicitTranspose.cpp | 4 +- src/recipes/FuseBatchNorm.cpp | 6 +- src/recipes/HorizontalTiling.cpp | 3 + src/recipes/LabelGraph.cpp | 6 +- unit_tests/graph/Test_Matching.cpp | 2 +- 12 files changed, 61 insertions(+), 57 deletions(-) diff --git a/aidge_core/export/node_export.py b/aidge_core/export/node_export.py index 7262e9a83..5cafcde63 100644 --- a/aidge_core/export/node_export.py +++ b/aidge_core/export/node_export.py @@ -20,10 +20,7 @@ class ExportNode(ABC): self.node = aidge_node self.operator = aidge_node.get_operator() self.name = self.node.name() - self.attributes = {} # Attributes are auto fetched from aidge operators - if isinstance(self.operator, aidge_core.Attributes): - for attr_name in self.operator.get_attrs_name(): - self.attributes[attr_name] = self.operator.get_attr(attr_name) + self.attributes = self.operator.attr.dict() # Attributes are auto fetched from aidge operators # rename is_leaf ? self.is_last = len(self.node.get_children()) == 0 diff --git a/aidge_core/unit_tests/test_operator_binding.py b/aidge_core/unit_tests/test_operator_binding.py index 164aee726..5b25eb797 100644 --- a/aidge_core/unit_tests/test_operator_binding.py +++ b/aidge_core/unit_tests/test_operator_binding.py @@ -30,42 +30,39 @@ class test_operator_binding(unittest.TestCase): self.assertNotEqual(gop.name(), "") def test_param_bool(self): - self.generic_operator.add_attr("bool", True) - self.assertEqual(self.generic_operator.has_attr("bool"), True) - self.assertEqual(self.generic_operator.get_attr("bool"), True) - self.assertEqual(self.generic_operator.get_attr_type("bool"), "bool") - self.assertEqual(self.generic_operator.get_attrs_name(), {"bool"}) - self.generic_operator.del_attr("bool") - self.assertEqual(self.generic_operator.has_attr("bool"), False) - self.assertEqual(len(self.generic_operator.get_attrs_name()), 0) + self.generic_operator.attr.add_attr("bool", True) + self.assertEqual(self.generic_operator.attr.has_attr("bool"), True) + self.assertEqual(self.generic_operator.attr.get_attr("bool"), True) + self.generic_operator.attr.del_attr("bool") + self.assertEqual(self.generic_operator.attr.has_attr("bool"), False) def test_param_int(self): - self.generic_operator.add_attr("int", 1) - self.assertEqual(self.generic_operator.get_attr("int"), 1) + self.generic_operator.attr.add_attr("int", 1) + self.assertEqual(self.generic_operator.attr.get_attr("int"), 1) def test_param_float(self): - self.generic_operator.add_attr("float", 2.0) - self.assertEqual(self.generic_operator.get_attr("float"), 2.0) + self.generic_operator.attr.add_attr("float", 2.0) + self.assertEqual(self.generic_operator.attr.get_attr("float"), 2.0) def test_param_str(self): - self.generic_operator.add_attr("str", "value") - self.assertEqual(self.generic_operator.get_attr("str"), "value") + self.generic_operator.attr.add_attr("str", "value") + self.assertEqual(self.generic_operator.attr.get_attr("str"), "value") def test_param_l_int(self): - self.generic_operator.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) - self.assertEqual(self.generic_operator.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) + self.generic_operator.attr.add_attr("l_int", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) + self.assertEqual(self.generic_operator.attr.get_attr("l_int"), [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) def test_param_l_bool(self): - self.generic_operator.add_attr("l_bool", [True, False, False, True]) - self.assertEqual(self.generic_operator.get_attr("l_bool"), [True, False, False, True]) + self.generic_operator.attr.add_attr("l_bool", [True, False, False, True]) + self.assertEqual(self.generic_operator.attr.get_attr("l_bool"), [True, False, False, True]) def test_param_l_float(self): - self.generic_operator.add_attr("l_float", [2.0, 1.0]) - self.assertEqual(self.generic_operator.get_attr("l_float"), [2.0, 1.0]) + self.generic_operator.attr.add_attr("l_float", [2.0, 1.0]) + self.assertEqual(self.generic_operator.attr.get_attr("l_float"), [2.0, 1.0]) def test_param_l_str(self): - self.generic_operator.add_attr("l_str", ["ok"]) - self.assertEqual(self.generic_operator.get_attr("l_str"), ["ok"]) + self.generic_operator.attr.add_attr("l_str", ["ok"]) + self.assertEqual(self.generic_operator.attr.get_attr("l_str"), ["ok"]) def test_dynamicattribute_binding(self): # Check original C++ attributes are binded @@ -76,20 +73,20 @@ class test_operator_binding(unittest.TestCase): self.assertEqual(attrs.get_attr("b"), "test") self.assertEqual(attrs.has_attr("c"), True) self.assertEqual(attrs.get_attr("c"), [True, False, True]) - self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c"}) + self.assertEqual(attrs.dict().keys(), {"a", "b", "c"}) self.assertEqual(attrs.has_attr("d"), False) # Add Python attributes attrs.add_attr("d", 18.56) self.assertEqual(attrs.get_attr("d"), 18.56) self.assertEqual(attrs.has_attr("d"), True) - self.assertEqual(attrs.get_attrs_name(), {"a", "b", "c", "d"}) + self.assertEqual(attrs.dict().keys(), {"a", "b", "c", "d"}) self.assertEqual(attrs.has_attr("e"), False) # Check that added Python attribute is accessible in C++ # Return the value of an attribute named "d" of type float64 (double in C++) self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 18.56) - attrs.set_attr("d", 23.89) + attrs.d = 23.89 self.assertEqual(aidge_core.test_DynamicAttributes_binding_check(attrs), 23.89) def test_forward_dims(self): @@ -129,18 +126,18 @@ class test_operator_binding(unittest.TestCase): myVar = 2 myBool = True # Test dynamic attribute set - gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", myVar=myVar).get_operator() - gop.myBool = myBool + gop = aidge_core.GenericOperator("test", 1, 0, 1, "FictiveName", my_var=myVar).get_operator() + gop.attr.my_bool = myBool # Test variable set by kwargs - self.assertEqual(gop.myVar, myVar) + self.assertEqual(gop.attr.my_var, myVar) # Test set attr - self.assertEqual(gop.myBool, myBool) + self.assertEqual(gop.attr.my_bool, myBool) # Test static attribute set ! prod = aidge_core.Producer([1]).get_operator() - self.assertEqual(prod.Constant, False) - prod.Constant = True # By default Constant is False - self.assertEqual(prod.Constant, True) + self.assertEqual(prod.attr.constant, False) + prod.attr.constant = True # By default Constant is False + self.assertEqual(prod.attr.constant, True) diff --git a/aidge_core/unit_tests/test_parameters.py b/aidge_core/unit_tests/test_parameters.py index e7b16963f..787385a80 100644 --- a/aidge_core/unit_tests/test_parameters.py +++ b/aidge_core/unit_tests/test_parameters.py @@ -29,7 +29,7 @@ class test_attributes(unittest.TestCase): conv_op = aidge_core.Conv2D(in_channels , out_channels, k_dims).get_operator() self.assertEqual(conv_op.in_channels(), in_channels) self.assertEqual(conv_op.out_channels(), out_channels) - self.assertEqual(conv_op.get_attr("KernelDims"), k_dims) + self.assertEqual(conv_op.attr.get_attr("kernel_dims"), k_dims) def test_fc(self): in_channels = 4 @@ -37,7 +37,7 @@ class test_attributes(unittest.TestCase): nb_bias = True fc_op = aidge_core.FC(in_channels, out_channels, nb_bias).get_operator() self.assertEqual(fc_op.out_channels(), out_channels) - self.assertEqual(fc_op.get_attr("NoBias"), nb_bias) + self.assertEqual(fc_op.attr.get_attr("no_bias"), nb_bias) def test_producer_1D(self): dims = [5] @@ -67,7 +67,7 @@ class test_attributes(unittest.TestCase): def test_leaky_relu(self): negative_slope = 0.25 leakyrelu_op = aidge_core.LeakyReLU(negative_slope).get_operator() - self.assertEqual(leakyrelu_op.get_attr("NegativeSlope"), negative_slope) + self.assertEqual(leakyrelu_op.attr.get_attr("negative_slope"), negative_slope) if __name__ == '__main__': unittest.main() diff --git a/aidge_core/unit_tests/test_tensor.py b/aidge_core/unit_tests/test_tensor.py index d479c98b2..89f8a046a 100644 --- a/aidge_core/unit_tests/test_tensor.py +++ b/aidge_core/unit_tests/test_tensor.py @@ -42,7 +42,7 @@ class test_tensor(unittest.TestCase): np_array = np.arange(9).reshape(1,1,3,3).astype(np.int32) # Numpy -> Tensor t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Int32) + self.assertEqual(t.dtype(), aidge_core.DataType.int32) for i_t, i_n in zip(t, np_array.flatten()): self.assertTrue(i_t == i_n) for i,j in zip(t.dims(), np_array.shape): @@ -62,7 +62,7 @@ class test_tensor(unittest.TestCase): np_array = np.arange(9).reshape(1,1,3,3).astype(np.int64) # Numpy -> Tensor t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Int64) + self.assertEqual(t.dtype(), aidge_core.DataType.int64) for i_t, i_n in zip(t, np_array.flatten()): self.assertTrue(i_t == i_n) for i,j in zip(t.dims(), np_array.shape): @@ -73,7 +73,7 @@ class test_tensor(unittest.TestCase): np_array = np.random.rand(1, 1, 3, 3).astype(np.float32) # Numpy -> Tensor t = aidge_core.Tensor(np_array) - self.assertEqual(t.dtype(), aidge_core.DataType.Float32) + self.assertEqual(t.dtype(), aidge_core.DataType.float32) for i_t, i_n in zip(t, np_array.flatten()): self.assertTrue(i_t == i_n) # TODO : May need to change this to a difference for i,j in zip(t.dims(), np_array.shape): diff --git a/src/graph/GraphView.cpp b/src/graph/GraphView.cpp index 5124d41f5..1581ac843 100644 --- a/src/graph/GraphView.cpp +++ b/src/graph/GraphView.cpp @@ -398,14 +398,21 @@ void Aidge::GraphView::compile(const std::string& backend, const Aidge::DataType } bool Aidge::GraphView::forwardDims(const std::vector<std::vector<Aidge::DimSize_t>>& dims, bool allowDataDependency) { + // remove current Data connections and use dummy inputs to propagate dimensions // setInputs // Link every tensor to the right pointer // following parent - children informations if (!dims.empty()){ AIDGE_ASSERT(dims.size() == mInputNodes.size(), "GraphView forwardDims error - Inconsistent number of given dimensions ({}) and graph inputs ({})", dims.size(), mInputNodes.size()); for (std::size_t i = 0; i < dims.size(); ++i){ - auto tensor = std::make_shared<Tensor>(dims[i]); - mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor); + const auto& currentTensorPtr = + std::dynamic_pointer_cast<OperatorTensor>(mInputNodes[i].first->getOperator())->getInput(mInputNodes[i].second); + if (currentTensorPtr) { // tensor detected + AIDGE_ASSERT(currentTensorPtr->dims() == dims[i], "Tensor of unexpected size provided.") + } else { + auto tensor = std::make_shared<Tensor>(dims[i]); + mInputNodes[i].first->getOperator()->setInput(mInputNodes[i].second, tensor); + } } } diff --git a/src/recipes/ConstantFolding.cpp b/src/recipes/ConstantFolding.cpp index 42fb45224..40b0bda76 100644 --- a/src/recipes/ConstantFolding.cpp +++ b/src/recipes/ConstantFolding.cpp @@ -44,7 +44,7 @@ void Aidge::constantFolding(std::shared_ptr<GraphView> graph) { } const auto& producer = std::static_pointer_cast<Producer_Op>(input.first->getOperator()); - if (!producer->getAttr<bool>("Constant")) { + if (!producer->constant()) { Log::info("Node {} (of type {}) not foldable because Producer input {} not Constant", node->name(), node->type(), input.first->name()); foldable = false; diff --git a/src/recipes/ExplicitCastMove.cpp b/src/recipes/ExplicitCastMove.cpp index 7d836c3ac..c860b9e8a 100644 --- a/src/recipes/ExplicitCastMove.cpp +++ b/src/recipes/ExplicitCastMove.cpp @@ -73,7 +73,7 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) { IOIndex_t inputIdx = 0; for (auto parent : node->inputs()) { - // TODO: possible optimization: currently, a Cast/Move Operator may + // TODO: possible optimization: currently, a Cast/Move Operator may // be added several time to the same output, if it has multiple childs, // even if it is the same conversion each time. if (parent.first != nullptr) { @@ -91,8 +91,8 @@ void Aidge::explicitCastMove(std::shared_ptr<GraphView> graph) { if (node->type() != Cast_Op::Type && input->dataType() != output->dataType()) { // Change of date type => a Cast operator is required - castOp = Cast(); - castOp->getOperator()->setDataType(output->dataType()); + castOp = Cast(output->dataType()); + // castOp->getOperator()->setDataType(output->dataType()); castOp->getOperator()->setBackend(device.first, device.second); if (moveOp == nullptr) { diff --git a/src/recipes/ExplicitTranspose.cpp b/src/recipes/ExplicitTranspose.cpp index a12e76e9d..7ff971b7e 100644 --- a/src/recipes/ExplicitTranspose.cpp +++ b/src/recipes/ExplicitTranspose.cpp @@ -57,7 +57,7 @@ void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) { IOIndex_t inputIdx = 0; for (auto parent : node->inputs()) { - // TODO: possible optimization: currently, a Transpose Operator may + // TODO: possible optimization: currently, a Transpose Operator may // be added several time to the same output, if it has multiple childs, // even if it is the same conversion each time. if (parent.first != nullptr) { @@ -97,7 +97,7 @@ void Aidge::explicitTranspose(std::shared_ptr<GraphView> graph) { const auto transpose = getDataFormatTranspose(parentInput->dataFormat(), output->dataFormat()); auto transposeOp = std::static_pointer_cast<Transpose_Op>(parent.first->getOperator()); transposeOp->setDataFormat(output->dataFormat()); - transposeOp->getAttr<std::vector<DimSize_t>>(0) = std::vector<DimSize_t>(transpose.begin(), transpose.end()); + transposeOp->outputDimsOrder() = std::vector<DimSize_t>(transpose.begin(), transpose.end()); } } else { diff --git a/src/recipes/FuseBatchNorm.cpp b/src/recipes/FuseBatchNorm.cpp index 7c8c9c2ba..aa20a056a 100644 --- a/src/recipes/FuseBatchNorm.cpp +++ b/src/recipes/FuseBatchNorm.cpp @@ -62,13 +62,13 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode, std::static_pointer_cast<Conv_Op<2>>(convNode->getOperator()); convNbOutChannels = convOpPtr->outChannels(); channelsSize = convOpPtr->inChannels(); - kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims"); + kernelDims = convOpPtr->kernelDims(); } else if (convNode->type() == ConvDepthWise_Op<2>::Type) { const std::shared_ptr<ConvDepthWise_Op<2>> convOpPtr = std::static_pointer_cast<ConvDepthWise_Op<2>>(convNode->getOperator()); convNbOutChannels = convOpPtr->nbChannels(); - kernelDims = convOpPtr->getAttr<std::array<DimSize_t, 2>>("KernelDims"); + kernelDims = convOpPtr->kernelDims(); } AIDGE_ASSERT(kernelDims.size() == 2, "fuseBatchNorm(): only 2D convolutions are supported"); @@ -78,7 +78,7 @@ void Aidge::fuseBatchNorm(std::shared_ptr<Aidge::Node> convNode, const Tensor& b_mean = batchOp->getInput(3)->refCastFrom(b_meanBuf, DataType::Float32, "cpu"); const Tensor& b_var = batchOp->getInput(4)->refCastFrom(b_varBuf, DataType::Float32, "cpu"); - const float epsilon = batchOp->getAttr<float>("Epsilon"); + const float epsilon = batchOp->epsilon(); assert(epsilon > 0.0); diff --git a/src/recipes/HorizontalTiling.cpp b/src/recipes/HorizontalTiling.cpp index 989754930..322623518 100644 --- a/src/recipes/HorizontalTiling.cpp +++ b/src/recipes/HorizontalTiling.cpp @@ -92,6 +92,7 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: auto slice = Slice(); auto backend = outTensor->getImpl()->backend(); + // Create Slice's Starts producer node std::vector<std::int64_t> inputDimsStart(inputDims[0].first.size()); for (std::size_t dim = 0; dim < inputDimsStart.size(); ++dim) { @@ -139,6 +140,8 @@ std::set<std::shared_ptr<Aidge::Node>> Aidge::getConvHorizontalTiling(const std: auto stepsNode = Producer(steps, slice->name() + sliceInputsNames[4]); stepsNode -> addChild(slice, 0, 4); + // auto slice = Slice(inputDimsStart, inputDimsEnd, usedDims, inputDimsSteps); + // auto backend = outTensor->getImpl()->backend(); slice -> addChild(newNode, 0, 0); newNode -> addChild(concat, 0, i); diff --git a/src/recipes/LabelGraph.cpp b/src/recipes/LabelGraph.cpp index ac0e6bfe1..75bcd36bf 100644 --- a/src/recipes/LabelGraph.cpp +++ b/src/recipes/LabelGraph.cpp @@ -22,7 +22,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) { if (node->type() == Conv_Op<2>::Type) { auto op = std::dynamic_pointer_cast<Conv_Op<2>>(node->getOperator()); - auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvAttr::KernelDims>(), op->template getAttr<ConvAttr::StrideDims>()); + auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims()); return std::make_shared<Node>(newOp, node->name()); } @@ -30,7 +30,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) { if (node->type() == ConvDepthWise_Op<2>::Type) { auto op = std::dynamic_pointer_cast<ConvDepthWise_Op<2>>(node->getOperator()); - auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<ConvDepthWiseAttr::KernelDims>(), op->template getAttr<ConvDepthWiseAttr::StrideDims>()); + auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims()); return std::make_shared<Node>(newOp, node->name()); } @@ -38,7 +38,7 @@ Aidge::NodePtr Aidge::nodeLabel(NodePtr node) { if (node->type() == AvgPooling_Op<2>::Type) { auto op = std::dynamic_pointer_cast<AvgPooling_Op<2>>(node->getOperator()); - auto newOp = std::make_shared<MaxPooling_Op<2>>(op->template getAttr<AvgPoolingAttr::KernelDims>(), op->template getAttr<AvgPoolingAttr::StrideDims>()); + auto newOp = std::make_shared<MaxPooling_Op<2>>(op->kernelDims(), op->strideDims()); return std::make_shared<Node>(newOp, node->name()); } diff --git a/unit_tests/graph/Test_Matching.cpp b/unit_tests/graph/Test_Matching.cpp index 903eefc0c..6abb4d371 100644 --- a/unit_tests/graph/Test_Matching.cpp +++ b/unit_tests/graph/Test_Matching.cpp @@ -323,7 +323,7 @@ TEST_CASE("[core/graph] Matching") { gm.addNodeLambda("3x3", [](const NodePtr& node) { const std::shared_ptr<Conv_Op<2>> op = std::static_pointer_cast<Conv_Op<2>>(node->getOperator()); - return (op->getAttr<std::array<DimSize_t, 2>>("KernelDims") == std::array<DimSize_t, 2>({3, 3})); + return (op->kernelDims() == std::array<DimSize_t, 2>({3, 3})); }); const auto results = gm.match("Pad->Conv[3x3]->ReLU"); -- GitLab