From 1aecd6c377800e7b6bc13f75b54c9eb443ebfbbc Mon Sep 17 00:00:00 2001 From: Gallasko <gallasko@gmail.com> Date: Wed, 26 Mar 2025 13:14:07 +0100 Subject: [PATCH] fix: Better control over random number generation in the model weights during unit tests --- aidge_export_cpp/unit_tests/test_export.py | 105 ++++++++++++++------- 1 file changed, 69 insertions(+), 36 deletions(-) diff --git a/aidge_export_cpp/unit_tests/test_export.py b/aidge_export_cpp/unit_tests/test_export.py index c5fa99c..20421ad 100644 --- a/aidge_export_cpp/unit_tests/test_export.py +++ b/aidge_export_cpp/unit_tests/test_export.py @@ -39,12 +39,45 @@ def normalize_random_tensor(randList): return aidge_core.Tensor(randList.astype(np.float32)) +import numpy as np +import operator +from functools import reduce + +def np_init(shape, dtype=np.float32): + """ + Generates a NumPy array with the given shape, filled with random values between -1 and 1 + with a step of 0.1. + + :param shape: Tuple of dimensions for the array + :param dtype: Data type of the output array (default: np.float32) + :return: A NumPy array with the given shape and dtype + """ + total_elements = reduce(operator.mul, shape, 1) + data = (np.random.randint(0, 21, size=total_elements) - 10) / 10.0 + return data.reshape(shape).astype(dtype) + def unit_test_export(graph_view, op_name, in_dims): + # Initialize parameters (weights and biases) + graph_view.compile("cpu", aidge_core.dtype.float32, dims=in_dims) + + for node in graph_view.get_nodes(): + if node.type() == "Producer": + prod_op = node.get_operator() + value = prod_op.get_output(0) + + # rand_tensor = aidge_core.Tensor(np_init(value.dims())) + # rand_tensor.set_backend(value.backend()) + # value = rand_tensor + + print(value) + + aidge_core.constant_filler(value, 0.01) + + scheduler = aidge_core.SequentialScheduler(graph_view) - # in_tensor = [aidge_core.Tensor(np.random.random(in_dim).astype(np.float32)) for in_dim in in_dims] - in_tensor = [normalize_random_tensor(np.random.rand(*in_dim)) for in_dim in in_dims] + in_tensor = [aidge_core.Tensor(np_init(in_dim)) for in_dim in in_dims] scheduler.forward(data=in_tensor) @@ -198,69 +231,69 @@ class test_operator_export(unittest.TestCase): def test_export_add(self): print("Add") model = aidge_core.sequential([ - aidge_core.Producer([1, 10]), - aidge_core.Add() + aidge_core.Producer([1, 5, 5], name="producer"), + aidge_core.Add(name="add") ]) - self.assertTrue(unit_test_export(model, "Add", [[1, 10]])) + self.assertTrue(unit_test_export(model, "Add", [[1, 5, 5]])) def test_export_sub(self): print("Sub") model = aidge_core.sequential([ - aidge_core.Producer([1, 10]), - aidge_core.Sub() + aidge_core.Producer([1, 5, 5], name="producer"), + aidge_core.Sub(name="sub") ]) - self.assertTrue(unit_test_export(model, "Sub", [[1, 10]])) + self.assertTrue(unit_test_export(model, "Sub", [[1, 5, 5]])) def test_export_mul(self): print("Mul") model = aidge_core.sequential([ - aidge_core.Producer([1, 10]), - aidge_core.Mul() + aidge_core.Producer([1, 5, 5], name="producer"), + aidge_core.Mul(name="mul") ]) - self.assertTrue(unit_test_export(model, "Mul", [[1, 10]])) + self.assertTrue(unit_test_export(model, "Mul", [[1, 5, 5]])) def test_export_conv2D(self): print("Conv2D") model = aidge_core.sequential([ - aidge_core.Conv2D(in_channels=3, out_channels=3, kernel_dims=(3, 3)) + aidge_core.Conv2D(in_channels=3, out_channels=3, kernel_dims=(3, 3), name="conv") ]) self.assertTrue(unit_test_export(model, "Conv2D", [[1, 3, 12, 12]])) - # def test_export_max_pooling(self): - # print("MaxPooling2D") - # model = aidge_core.sequential([ - # aidge_core.MaxPooling2D(kernel_dims=(3, 3)) - # ]) + def test_export_max_pooling(self): + print("MaxPooling2D") + model = aidge_core.sequential([ + aidge_core.MaxPooling2D(kernel_dims=(3, 3), name="max_pool") + ]) - # self.assertTrue(unit_test_export(model, "MaxPooling2D", [[1, 2, 12, 12]])) + self.assertTrue(unit_test_export(model, "MaxPooling2D", [[1, 2, 12, 12]])) - # def test_export_avg_pooling(self): - # print("AvgPooling2D") - # model = aidge_core.sequential([ - # aidge_core.AvgPooling2D(kernel_dims=(3, 3), name="avg_pool0") - # ]) + def test_export_avg_pooling(self): + print("AvgPooling2D") + model = aidge_core.sequential([ + aidge_core.AvgPooling2D(kernel_dims=(3, 3), name="avg_pool") + ]) - # self.assertTrue(unit_test_export(model, "AvgPooling2D", [[1, 2, 12, 12]])) + self.assertTrue(unit_test_export(model, "AvgPooling2D", [[1, 2, 12, 12]])) - # def test_export_pad2D(self): - # print("Pad2D") - # model = aidge_core.sequential([ - # aidge_core.Softmax(axis=1, name="sf0") - # ]) + def test_export_pad2D(self): + print("Pad2D") + model = aidge_core.sequential([ + aidge_core.Pad2D((1, 1, 1, 1), name="pad2d") + ]) - # self.assertTrue(unit_test_export(model, "Softmax", [[1, 10]])) + self.assertTrue(unit_test_export(model, "Pad2D", [[1, 3, 10, 10]])) - # def test_export_batchnorm2D(self): - # print("BatchNormalization2D") - # model = aidge_core.sequential([ - # aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5) - # ]) + def test_export_batchnorm2D(self): + print("BatchNormalization2D") + model = aidge_core.sequential([ + aidge_core.BatchNorm2D(nb_features=10, epsilon=2e-5, name="bn") + ]) - # self.assertTrue(unit_test_export(model, "BatchNorm2D", [[1, 10]])) + self.assertTrue(unit_test_export(model, "BatchNorm2D", [[1, 10, 5, 5]])) def test_export_cpp(self): print("Export test to do") -- GitLab