From bc61fe4ea04db215720089f8d5533f04bce5044e Mon Sep 17 00:00:00 2001 From: NAUD Maxence <maxence.naud@cea.fr> Date: Fri, 19 Apr 2024 14:01:41 +0000 Subject: [PATCH] Fix MetaOperator test --- unit_tests/operator/Test_MetaOperator.cpp | 10 ++++-- unit_tests/operator/Test_MetaOperator.py | 42 ----------------------- 2 files changed, 7 insertions(+), 45 deletions(-) delete mode 100644 unit_tests/operator/Test_MetaOperator.py diff --git a/unit_tests/operator/Test_MetaOperator.cpp b/unit_tests/operator/Test_MetaOperator.cpp index b15074d1b..ed4afafe3 100644 --- a/unit_tests/operator/Test_MetaOperator.cpp +++ b/unit_tests/operator/Test_MetaOperator.cpp @@ -9,6 +9,12 @@ * ********************************************************************************/ +#include <cstddef> // std::size_t +#include <memory> +#include <string> +#include <utility> // std::pair +#include <vector> + #include <catch2/catch_test_macros.hpp> #include "aidge/operator/Pop.hpp" @@ -17,7 +23,6 @@ #include "aidge/graph/GraphView.hpp" #include "aidge/graph/Testing.hpp" #include "aidge/recipes/Recipes.hpp" -#include <cstddef> using namespace Aidge; @@ -37,8 +42,7 @@ TEST_CASE("[core/operators] MetaOperator", "[Operator][MetaOperator]") { REQUIRE(op->nbData() == 1); REQUIRE(op->nbOutputs() == 1); - std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(); - myInput->resize({2,3,5,5}); + std::shared_ptr<Tensor> myInput = std::make_shared<Tensor>(std::vector<std::size_t>({2,1,5,5})); std::shared_ptr<OperatorTensor> opTensor = std::static_pointer_cast<OperatorTensor>(op->getOperator()); opTensor->associateInput(0,myInput); opTensor->forwardDims(); diff --git a/unit_tests/operator/Test_MetaOperator.py b/unit_tests/operator/Test_MetaOperator.py deleted file mode 100644 index a525c94b9..000000000 --- a/unit_tests/operator/Test_MetaOperator.py +++ /dev/null @@ -1,42 +0,0 @@ -import onnx -from onnx.backend.test.case.node.lstm import LSTMHelper -from onnx.backend.test.case.node import expect -import numpy as np - -input = np.array([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]]]).astype(np.float32) -print(input.shape) -input_size = 2 -hidden_size = 3 -weight_scale = 0.1 -number_of_gates = 4 - -node = onnx.helper.make_node( - "LSTM", inputs=["X", "W", "R"], outputs=["", "Y_h"], hidden_size=hidden_size -) - -W = weight_scale * np.ones( - (1, number_of_gates * hidden_size, input_size) -).astype(np.float32) -R = weight_scale * np.ones( - (1, number_of_gates * hidden_size, hidden_size) -).astype(np.float32) - -lstm = LSTMHelper(X=input, W=W, R=R) -_, Y_h = lstm.step() - -print(lstm.C_0 ) - -seq_length = input.shape[0] -batch_size = input.shape[1] - -print(seq_length) -print(np.split(input, input.shape[0], axis=0)) - -expect( - node, - inputs=[input, W, R], - outputs=[Y_h.astype(np.float32)], - name="test_lstm_defaults", -) - -print(Y_h) \ No newline at end of file -- GitLab