blob: b400d76aeab6ddc62d4d24c0144e1d3731865d3d [file] [log] [blame]
Jeremy Johnsonbe1a9402021-12-15 17:14:56 +00001"""TOSA verification runner script."""
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import glob
6import importlib
7import os
8import queue
9import threading
10import traceback
11from datetime import datetime
12from pathlib import Path
13
14from json2numpy import json2numpy
15from runner.tosa_test_runner import TosaTestInvalid
16from runner.tosa_test_runner import TosaTestRunner
17from xunit import xunit
18
19TOSA_REFMODEL_RUNNER = "runner.tosa_refmodel_sut_run"
20MAX_XUNIT_TEST_MESSAGE = 1000
21
22
23def parseArgs(argv):
24 """Parse the arguments and return the settings."""
25 parser = argparse.ArgumentParser()
26 group = parser.add_mutually_exclusive_group(required=True)
27 group.add_argument(
28 "-t",
29 "--test",
30 dest="test",
31 type=str,
32 nargs="+",
33 help="Test(s) to run",
34 )
35 group.add_argument(
36 "-T",
37 "--test-list",
38 dest="test_list_file",
39 type=Path,
40 help="File containing list of tests to run (one per line)",
41 )
42 parser.add_argument(
43 "--operator-fbs",
44 dest="operator_fbs",
45 default="conformance_tests/third_party/serialization_lib/schema/tosa.fbs",
46 type=str,
47 help="flat buffer syntax file",
48 )
49 parser.add_argument(
50 "--ref-model-path",
51 dest="ref_model_path",
52 default="reference_model/build/reference_model/tosa_reference_model",
53 type=str,
54 help="Path to reference model executable",
55 )
56 parser.add_argument(
57 "--flatc-path",
58 dest="flatc_path",
59 default="reference_model/build/thirdparty/serialization_lib/third_party/flatbuffers/flatc",
60 type=str,
61 help="Path to flatc compiler executable",
62 )
63 parser.add_argument(
64 "--ref-debug",
65 dest="ref_debug",
66 default="",
67 type=str,
68 help="Reference debug flag (low, med, high)",
69 )
70 parser.add_argument(
71 "--ref-intermediates",
72 dest="ref_intermediates",
73 default=0,
74 type=int,
75 help="Reference model dumps intermediate tensors",
76 )
77 parser.add_argument(
78 "-b",
79 "--binary",
80 dest="binary",
81 action="store_true",
82 help="Convert to using binary flatbuffers instead of JSON",
83 )
84 parser.add_argument(
85 "-v", "--verbose", dest="verbose", action="count", help="Verbose operation"
86 )
87 parser.add_argument(
88 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
89 )
90 parser.add_argument(
91 "--sut-module",
92 "-s",
93 dest="sut_module",
94 type=str,
95 nargs="+",
96 default=[TOSA_REFMODEL_RUNNER],
97 help="System under test module to load (derives from TosaTestRunner). May be repeated",
98 )
99 parser.add_argument(
100 "--sut-module-args",
101 dest="sut_module_args",
102 type=str,
103 nargs="+",
104 default=[],
105 help="System under test module arguments. Use sutmodulename:argvalue to pass an argument. May be repeated.",
106 )
107 parser.add_argument(
108 "--xunit-file",
109 dest="xunit_file",
110 type=str,
111 default="result.xml",
112 help="XUnit output file",
113 )
114 parser.add_argument(
115 "--test-type",
116 dest="test_type",
117 type=str,
118 default="both",
119 choices=["positive", "negative", "both"],
120 help="Filter tests based on expected failure status (positive, negative or both)",
121 )
Jeremy Johnson015c3552022-02-23 12:15:03 +0000122 parser.add_argument(
123 "--no-color",
124 "--no-colour",
125 dest="no_color",
126 action="store_true",
127 help="Disable color output",
128 )
Jeremy Johnsonbe1a9402021-12-15 17:14:56 +0000129
130 args = parser.parse_args(argv)
131
132 # Autodetect CPU count
133 if args.jobs <= 0:
134 args.jobs = os.cpu_count()
135
136 return args
137
138
139EXCLUSION_PREFIX = ["test", "model", "desc"]
140
141
142def convert2Numpy(testDir):
143 """Convert all the JSON numpy files back into binary numpy."""
144 jsons = glob.glob(os.path.join(testDir, "*.json"))
145 for json in jsons:
146 for exclude in EXCLUSION_PREFIX:
147 if os.path.basename(json).startswith(exclude):
148 json = ""
149 if json:
150 # debug print("Converting " + json)
151 json2numpy.json_to_npy(Path(json))
152
153
154def workerThread(task_queue, runnerList, args, result_queue):
155 """Worker thread that runs the next test from the queue."""
156 while True:
157 try:
158 test = task_queue.get(block=False)
159 except queue.Empty:
160 break
161
162 if test is None:
163 break
164
165 msg = ""
166 converted = False
167 for runnerModule, runnerArgs in runnerList:
168 try:
169 start_time = datetime.now()
170 # Set up system under test runner
171 runnerName = runnerModule.__name__
172 runner = runnerModule.TosaSUTRunner(args, runnerArgs, test)
173
174 if runner.skipTest():
175 msg = "Skipping non-{} test".format(args.test_type)
176 print("{} {}".format(msg, test))
177 rc = TosaTestRunner.Result.SKIPPED
178 else:
179 # Convert JSON data files into numpy format on first pass
180 if not converted:
181 convert2Numpy(test)
182 converted = True
183
184 if args.verbose:
185 print("Running runner {} with test {}".format(runnerName, test))
186 try:
187 grc, gmsg = runner.runTestGraph()
188 rc, msg = runner.testResult(grc, gmsg)
189 except Exception as e:
190 msg = "System Under Test error: {}".format(e)
191 print(msg)
192 print(
193 "".join(
194 traceback.format_exception(
195 etype=type(e), value=e, tb=e.__traceback__
196 )
197 )
198 )
199 rc = TosaTestRunner.Result.INTERNAL_ERROR
200 except Exception as e:
201 msg = "Internal error: {}".format(e)
202 print(msg)
203 if not isinstance(e, TosaTestInvalid):
204 # Show stack trace on unexpected exceptions
205 print(
206 "".join(
207 traceback.format_exception(
208 etype=type(e), value=e, tb=e.__traceback__
209 )
210 )
211 )
212 rc = TosaTestRunner.Result.INTERNAL_ERROR
213 finally:
214 end_time = datetime.now()
215 result_queue.put((runnerName, test, rc, msg, end_time - start_time))
216
217 task_queue.task_done()
218
219 return True
220
221
222def loadSUTRunnerModules(args):
223 """Load in the system under test modules.
224
225 Returns a list of tuples of (runner_module, [argument list])
226 """
227 runnerList = []
228 # Remove any duplicates from the list
229 sut_module_list = list(set(args.sut_module))
230 for r in sut_module_list:
231 if args.verbose:
232 print("Loading module {}".format(r))
233
234 runner = importlib.import_module(r)
235
236 # Look for arguments associated with this runner
237 runnerArgPrefix = "{}:".format(r)
238 runnerArgList = []
239 for a in args.sut_module_args:
240 if a.startswith(runnerArgPrefix):
241 runnerArgList.append(a[len(runnerArgPrefix) :])
242 runnerList.append((runner, runnerArgList))
243
244 return runnerList
245
246
247def createXUnitResults(xunitFile, runnerList, resultLists, verbose):
248 """Create the xunit results file."""
249 xunit_result = xunit.xunit_results()
250
251 for runnerModule, _ in runnerList:
252 # Create test suite per system under test (runner)
253 runner = runnerModule.__name__
254 xunit_suite = xunit_result.create_suite(runner)
255
256 # Sort by test name
257 for test, rc, msg, time_delta in sorted(
258 resultLists[runner], key=lambda tup: tup[0]
259 ):
260 test_name = test
261 xt = xunit.xunit_test(test_name, runner)
262
263 xt.time = str(
264 float(time_delta.seconds) + (float(time_delta.microseconds) * 1e-6)
265 )
266
267 testMsg = rc.name if not msg else "{}: {}".format(rc.name, msg)
268
269 if (
270 rc == TosaTestRunner.Result.EXPECTED_PASS
271 or rc == TosaTestRunner.Result.EXPECTED_FAILURE
272 ):
273 if verbose:
274 print("{} {} ({})".format(rc.name, test_name, runner))
275 elif rc == TosaTestRunner.Result.SKIPPED:
276 xt.skipped()
277 if verbose:
278 print("{} {} ({})".format(rc.name, test_name, runner))
279 else:
280 xt.failed(testMsg)
281 print("{} {} ({})".format(rc.name, test_name, runner))
282
283 xunit_suite.tests.append(xt)
284
285 xunit_result.write_results(xunitFile)
286
287
288def main(argv=None):
289 """Start worker threads to do the testing and outputs the results."""
290 args = parseArgs(argv)
291
292 if TOSA_REFMODEL_RUNNER in args.sut_module and not os.path.isfile(
293 args.ref_model_path
294 ):
295 print(
296 "Argument error: Reference Model not found ({})".format(args.ref_model_path)
297 )
298 exit(2)
299
300 if args.test_list_file:
301 try:
302 with open(args.test_list_file) as f:
303 args.test = f.read().splitlines()
304 except Exception as e:
305 print(
306 "Argument error: Cannot read list of tests in {}\n{}".format(
307 args.test_list_file, e
308 )
309 )
310 exit(2)
311
312 runnerList = loadSUTRunnerModules(args)
313
314 threads = []
315 taskQueue = queue.Queue()
316 resultQueue = queue.Queue()
317
318 for t in args.test:
319 if os.path.isfile(t):
320 if not os.path.basename(t) == "README":
321 print("Warning: Skipping test {} as not a valid directory".format(t))
322 else:
323 taskQueue.put((t))
324
325 print(
326 "Running {} tests on {} system{} under test".format(
327 taskQueue.qsize(), len(runnerList), "s" if len(runnerList) > 1 else ""
328 )
329 )
330
331 for i in range(args.jobs):
332 t = threading.Thread(
333 target=workerThread, args=(taskQueue, runnerList, args, resultQueue)
334 )
335 t.setDaemon(True)
336 t.start()
337 threads.append(t)
338
339 taskQueue.join()
340
341 # Set up results lists for each system under test
342 resultLists = {}
343 results = {}
344 for runnerModule, _ in runnerList:
345 runner = runnerModule.__name__
346 resultLists[runner] = []
347 results[runner] = [0] * len(TosaTestRunner.Result)
348
349 while True:
350 try:
351 runner, test, rc, msg, time_delta = resultQueue.get(block=False)
352 resultQueue.task_done()
353 except queue.Empty:
354 break
355
356 # Limit error messages to make results easier to digest
357 if msg and len(msg) > MAX_XUNIT_TEST_MESSAGE:
358 half = int(MAX_XUNIT_TEST_MESSAGE / 2)
359 trimmed = len(msg) - MAX_XUNIT_TEST_MESSAGE
360 msg = "{} ...\nskipped {} bytes\n... {}".format(
361 msg[:half], trimmed, msg[-half:]
362 )
363 resultLists[runner].append((test, rc, msg, time_delta))
364 results[runner][rc] += 1
365
366 createXUnitResults(args.xunit_file, runnerList, resultLists, args.verbose)
367
368 # Print out results for each system under test
369 for runnerModule, _ in runnerList:
370 runner = runnerModule.__name__
371 resultSummary = []
372 for result in TosaTestRunner.Result:
373 resultSummary.append(
374 "{} {}".format(results[runner][result], result.name.lower())
375 )
376 print("Totals ({}): {}".format(runner, ", ".join(resultSummary)))
377
378 return 0
379
380
381if __name__ == "__main__":
382 exit(main())